2026-03-09T15:23:49.510 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-03-09T15:23:49.523 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T15:23:49.547 INFO:teuthology.run:Config: archive_path: /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525 branch: squid description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} email: null first_in_suite: false flavor: default job_id: '525' last_in_suite: false machine_type: vps name: kyr-2026-03-09_11:23:05-orch-squid-none-default-vps no_nested_subset: false os_type: centos os_version: 9.stream overrides: admin_socket: branch: squid ansible.cephlab: branch: main skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: timezone: UTC ceph: conf: global: mon election default strategy: 1 mgr: debug mgr: 20 debug ms: 1 mgr/cephadm/use_agent: false mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_STRAY_DAEMON - CEPHADM_FAILED_DAEMON - CEPHADM_AGENT_DOWN log-only-match: - CEPHADM_ sha1: e911bdebe5c8faa3800735d1568fcdca65db60df ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} install: ceph: flavor: default sha1: e911bdebe5c8faa3800735d1568fcdca65db60df extra_system_packages: deb: - python3-xmltodict - python3-jmespath rpm: - bzip2 - perl-Test-Harness - python3-xmltodict - python3-jmespath workunit: branch: tt-squid sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 owner: kyr priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - mon.a - mon.c - mgr.y - osd.0 - osd.1 - osd.2 - osd.3 - client.0 - node-exporter.a - alertmanager.a - - mon.b - mgr.x - osd.4 - osd.5 - osd.6 - osd.7 - client.1 - prometheus.a - grafana.a - node-exporter.b seed: 3443 sha1: e911bdebe5c8faa3800735d1568fcdca65db60df sleep_before_teardown: 0 subset: 1/64 suite: orch suite_branch: tt-squid suite_path: /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa suite_relpath: qa suite_repo: https://github.com/kshtsk/ceph.git suite_sha1: 569c3e99c9b32a51b4eaf08731c728f4513ed589 targets: vm05.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHf6pdEw+la//CXD7ndRUa7GlGea36E0JYuaLggwR2SeBQSKFkcj03MbwMEwKO/yWYqt4CzHcFN1g4KGyzfHcN8= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNh2k90dSGgr4b4ANWGcD09a/z2GoK816e5VhCRKhCEytCYFJoqIVUaQQ4v/xvT0gr7W/3Q7uMW/2H/kBCSgR3w= tasks: - cephadm: cephadm_branch: v17.2.0 cephadm_git_url: https://github.com/ceph/ceph image: quay.io/ceph/ceph:v17.2.0 - cephadm.shell: mon.a: - ceph config set mgr mgr/cephadm/use_repo_digest false --force - cephadm.shell: env: - sha1 mon.a: - radosgw-admin realm create --rgw-realm=r --default - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default - radosgw-admin period update --rgw-realm=r --commit - ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000 - ceph orch apply rgw smpl - ceph osd pool create foo - rbd pool init foo - ceph orch apply iscsi foo u p - sleep 120 - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force - ceph config set global log_to_journald false --force - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 - cephadm.shell: env: - sha1 mon.a: - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done - ceph orch ps - ceph versions - echo "wait for servicemap items w/ changing names to refresh" - sleep 60 - ceph orch ps - ceph versions - ceph orch upgrade status - ceph health detail - ceph versions | jq -e '.overall | length == 1' - ceph versions | jq -e '.overall | keys' | grep $sha1 - ceph orch ls | grep '^osd ' - cephadm.shell: mon.a: - ceph orch upgrade ls - ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0 - ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2 teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/clyso/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-03-09_11:23:05 tube: vps user: kyr verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.611473 2026-03-09T15:23:49.548 INFO:teuthology.run:suite_path is set to /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa; will attempt to use it 2026-03-09T15:23:49.548 INFO:teuthology.run:Found tasks at /home/teuthos/src/github.com_kshtsk_ceph_569c3e99c9b32a51b4eaf08731c728f4513ed589/qa/tasks 2026-03-09T15:23:49.548 INFO:teuthology.run_tasks:Running task internal.check_packages... 2026-03-09T15:23:49.548 INFO:teuthology.task.internal:Checking packages... 2026-03-09T15:23:49.548 INFO:teuthology.task.internal:Checking packages for os_type 'centos', flavor 'default' and ceph hash 'e911bdebe5c8faa3800735d1568fcdca65db60df' 2026-03-09T15:23:49.549 WARNING:teuthology.packaging:More than one of ref, tag, branch, or sha1 supplied; using branch 2026-03-09T15:23:49.549 INFO:teuthology.packaging:ref: None 2026-03-09T15:23:49.549 INFO:teuthology.packaging:tag: None 2026-03-09T15:23:49.549 INFO:teuthology.packaging:branch: squid 2026-03-09T15:23:49.549 INFO:teuthology.packaging:sha1: e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:23:49.549 DEBUG:teuthology.packaging:Querying https://shaman.ceph.com/api/search?status=ready&project=ceph&flavor=default&distros=centos%2F9%2Fx86_64&ref=squid 2026-03-09T15:23:50.253 INFO:teuthology.task.internal:Found packages for ceph version 19.2.3-678.ge911bdeb 2026-03-09T15:23:50.254 INFO:teuthology.run_tasks:Running task internal.buildpackages_prep... 2026-03-09T15:23:50.255 INFO:teuthology.task.internal:no buildpackages task found 2026-03-09T15:23:50.255 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-03-09T15:23:50.255 INFO:teuthology.task.internal:Saving configuration 2026-03-09T15:23:50.260 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-03-09T15:23:50.261 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-03-09T15:23:50.269 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm05.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 15:22:35.378306', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:05', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBHf6pdEw+la//CXD7ndRUa7GlGea36E0JYuaLggwR2SeBQSKFkcj03MbwMEwKO/yWYqt4CzHcFN1g4KGyzfHcN8='} 2026-03-09T15:23:50.275 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'centos', 'os_version': '9.stream', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-03-09 15:22:35.377677', 'locked_by': 'kyr', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNh2k90dSGgr4b4ANWGcD09a/z2GoK816e5VhCRKhCEytCYFJoqIVUaQQ4v/xvT0gr7W/3Q7uMW/2H/kBCSgR3w='} 2026-03-09T15:23:50.275 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-03-09T15:23:50.276 INFO:teuthology.task.internal:roles: ubuntu@vm05.local - ['mon.a', 'mon.c', 'mgr.y', 'osd.0', 'osd.1', 'osd.2', 'osd.3', 'client.0', 'node-exporter.a', 'alertmanager.a'] 2026-03-09T15:23:50.276 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['mon.b', 'mgr.x', 'osd.4', 'osd.5', 'osd.6', 'osd.7', 'client.1', 'prometheus.a', 'grafana.a', 'node-exporter.b'] 2026-03-09T15:23:50.276 INFO:teuthology.run_tasks:Running task console_log... 2026-03-09T15:23:50.283 DEBUG:teuthology.task.console_log:vm05 does not support IPMI; excluding 2026-03-09T15:23:50.289 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-03-09T15:23:50.289 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7f4318c7e170>, signals=[15]) 2026-03-09T15:23:50.289 INFO:teuthology.run_tasks:Running task internal.connect... 2026-03-09T15:23:50.290 INFO:teuthology.task.internal:Opening connections... 2026-03-09T15:23:50.290 DEBUG:teuthology.task.internal:connecting to ubuntu@vm05.local 2026-03-09T15:23:50.291 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T15:23:50.355 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-03-09T15:23:50.356 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T15:23:50.417 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-03-09T15:23:50.418 DEBUG:teuthology.orchestra.run.vm05:> uname -m 2026-03-09T15:23:50.466 INFO:teuthology.orchestra.run.vm05.stdout:x86_64 2026-03-09T15:23:50.466 DEBUG:teuthology.orchestra.run.vm05:> cat /etc/os-release 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:NAME="CentOS Stream" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:VERSION="9" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:ID="centos" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:ID_LIKE="rhel fedora" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:VERSION_ID="9" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:PLATFORM_ID="platform:el9" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:ANSI_COLOR="0;31" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:LOGO="fedora-logo-icon" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:HOME_URL="https://centos.org/" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T15:23:50.527 INFO:teuthology.orchestra.run.vm05.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T15:23:50.527 INFO:teuthology.lock.ops:Updating vm05.local on lock server 2026-03-09T15:23:50.532 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-03-09T15:23:50.548 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-03-09T15:23:50.548 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:NAME="CentOS Stream" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="9" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:ID="centos" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE="rhel fedora" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="9" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:PLATFORM_ID="platform:el9" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="CentOS Stream 9" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:ANSI_COLOR="0;31" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:LOGO="fedora-logo-icon" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:CPE_NAME="cpe:/o:centos:centos:9" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://centos.org/" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://issues.redhat.com/" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux 9" 2026-03-09T15:23:50.602 INFO:teuthology.orchestra.run.vm09.stdout:REDHAT_SUPPORT_PRODUCT_VERSION="CentOS Stream" 2026-03-09T15:23:50.602 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-03-09T15:23:50.606 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-03-09T15:23:50.608 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-03-09T15:23:50.609 INFO:teuthology.task.internal:Checking for old test directory... 2026-03-09T15:23:50.609 DEBUG:teuthology.orchestra.run.vm05:> test '!' -e /home/ubuntu/cephtest 2026-03-09T15:23:50.610 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-03-09T15:23:50.658 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-03-09T15:23:50.659 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-03-09T15:23:50.659 DEBUG:teuthology.orchestra.run.vm05:> test -z $(ls -A /var/lib/ceph) 2026-03-09T15:23:50.665 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-03-09T15:23:50.679 INFO:teuthology.orchestra.run.vm05.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T15:23:50.714 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-03-09T15:23:50.714 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-03-09T15:23:50.723 DEBUG:teuthology.orchestra.run.vm05:> test -e /ceph-qa-ready 2026-03-09T15:23:50.738 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:23:51.003 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-03-09T15:23:51.017 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:23:51.210 INFO:teuthology.run_tasks:Running task internal.base... 2026-03-09T15:23:51.211 INFO:teuthology.task.internal:Creating test directory... 2026-03-09T15:23:51.212 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T15:23:51.214 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-03-09T15:23:51.228 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-03-09T15:23:51.230 INFO:teuthology.run_tasks:Running task internal.archive... 2026-03-09T15:23:51.231 INFO:teuthology.task.internal:Creating archive directory... 2026-03-09T15:23:51.231 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T15:23:51.269 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-03-09T15:23:51.287 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-03-09T15:23:51.288 INFO:teuthology.task.internal:Enabling coredump saving... 2026-03-09T15:23:51.288 DEBUG:teuthology.orchestra.run.vm05:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T15:23:51.338 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:23:51.338 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-03-09T15:23:51.352 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:23:51.352 DEBUG:teuthology.orchestra.run.vm05:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T15:23:51.381 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-03-09T15:23:51.403 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T15:23:51.412 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T15:23:51.417 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T15:23:51.426 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-03-09T15:23:51.427 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-03-09T15:23:51.429 INFO:teuthology.task.internal:Configuring sudo... 2026-03-09T15:23:51.429 DEBUG:teuthology.orchestra.run.vm05:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T15:23:51.455 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-03-09T15:23:51.492 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-03-09T15:23:51.495 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-03-09T15:23:51.495 DEBUG:teuthology.orchestra.run.vm05:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T15:23:51.521 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-03-09T15:23:51.551 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T15:23:51.597 DEBUG:teuthology.orchestra.run.vm05:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T15:23:51.651 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:23:51.651 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T15:23:51.709 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T15:23:51.736 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T15:23:51.795 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:23:51.795 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-03-09T15:23:51.858 DEBUG:teuthology.orchestra.run.vm05:> sudo service rsyslog restart 2026-03-09T15:23:51.860 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-03-09T15:23:51.888 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T15:23:51.931 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T15:23:52.339 INFO:teuthology.run_tasks:Running task internal.timer... 2026-03-09T15:23:52.341 INFO:teuthology.task.internal:Starting timer... 2026-03-09T15:23:52.341 INFO:teuthology.run_tasks:Running task pcp... 2026-03-09T15:23:52.344 INFO:teuthology.run_tasks:Running task selinux... 2026-03-09T15:23:52.346 INFO:teuthology.task.selinux:Excluding vm05: VMs are not yet supported 2026-03-09T15:23:52.346 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-03-09T15:23:52.346 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-03-09T15:23:52.346 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-03-09T15:23:52.346 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-03-09T15:23:52.346 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-03-09T15:23:52.348 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'timezone': 'UTC'}} 2026-03-09T15:23:52.348 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/ceph/ceph-cm-ansible.git 2026-03-09T15:23:52.349 INFO:teuthology.repo_utils:Fetching github.com_ceph_ceph-cm-ansible_main from origin 2026-03-09T15:23:53.043 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main to origin/main 2026-03-09T15:23:53.049 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-03-09T15:23:53.049 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "timezone": "UTC"}' -i /tmp/teuth_ansible_inventoryepjqqnys --limit vm05.local,vm09.local /home/teuthos/src/github.com_ceph_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-03-09T15:25:56.746 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm05.local'), Remote(name='ubuntu@vm09.local')] 2026-03-09T15:25:56.746 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm05.local' 2026-03-09T15:25:56.747 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm05.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T15:25:56.810 DEBUG:teuthology.orchestra.run.vm05:> true 2026-03-09T15:25:56.888 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm05.local' 2026-03-09T15:25:56.889 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-03-09T15:25:56.889 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-03-09T15:25:56.957 DEBUG:teuthology.orchestra.run.vm09:> true 2026-03-09T15:25:57.037 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-03-09T15:25:57.037 INFO:teuthology.run_tasks:Running task clock... 2026-03-09T15:25:57.040 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-03-09T15:25:57.040 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T15:25:57.040 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T15:25:57.042 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-03-09T15:25:57.042 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T15:25:57.075 INFO:teuthology.orchestra.run.vm05.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T15:25:57.098 INFO:teuthology.orchestra.run.vm05.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T15:25:57.118 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntp.service: Unit ntp.service not loaded. 2026-03-09T15:25:57.131 INFO:teuthology.orchestra.run.vm05.stderr:sudo: ntpd: command not found 2026-03-09T15:25:57.135 INFO:teuthology.orchestra.run.vm09.stderr:Failed to stop ntpd.service: Unit ntpd.service not loaded. 2026-03-09T15:25:57.143 INFO:teuthology.orchestra.run.vm05.stdout:506 Cannot talk to daemon 2026-03-09T15:25:57.162 INFO:teuthology.orchestra.run.vm05.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T15:25:57.164 INFO:teuthology.orchestra.run.vm09.stderr:sudo: ntpd: command not found 2026-03-09T15:25:57.179 INFO:teuthology.orchestra.run.vm09.stdout:506 Cannot talk to daemon 2026-03-09T15:25:57.184 INFO:teuthology.orchestra.run.vm05.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T15:25:57.197 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntp.service: Unit ntp.service not found. 2026-03-09T15:25:57.214 INFO:teuthology.orchestra.run.vm09.stderr:Failed to start ntpd.service: Unit ntpd.service not found. 2026-03-09T15:25:57.247 INFO:teuthology.orchestra.run.vm05.stderr:bash: line 1: ntpq: command not found 2026-03-09T15:25:57.250 INFO:teuthology.orchestra.run.vm05.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T15:25:57.250 INFO:teuthology.orchestra.run.vm05.stdout:=============================================================================== 2026-03-09T15:25:57.250 INFO:teuthology.orchestra.run.vm05.stdout:^? basilisk.mybb.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.250 INFO:teuthology.orchestra.run.vm05.stdout:^? s7.vonderste.in 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.250 INFO:teuthology.orchestra.run.vm05.stdout:^? www.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.250 INFO:teuthology.orchestra.run.vm05.stdout:^? mail.klausen.dk 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.267 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-09T15:25:57.269 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T15:25:57.270 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-09T15:25:57.270 INFO:teuthology.orchestra.run.vm09.stdout:^? basilisk.mybb.de 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.270 INFO:teuthology.orchestra.run.vm09.stdout:^? s7.vonderste.in 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.270 INFO:teuthology.orchestra.run.vm09.stdout:^? www.kernfusion.at 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.270 INFO:teuthology.orchestra.run.vm09.stdout:^? mail.klausen.dk 0 6 0 - +0ns[ +0ns] +/- 0ns 2026-03-09T15:25:57.270 INFO:teuthology.run_tasks:Running task cephadm... 2026-03-09T15:25:57.320 INFO:tasks.cephadm:Config: {'cephadm_branch': 'v17.2.0', 'cephadm_git_url': 'https://github.com/ceph/ceph', 'image': 'quay.io/ceph/ceph:v17.2.0', 'conf': {'global': {'mon election default strategy': 1}, 'mgr': {'debug mgr': 20, 'debug ms': 1, 'mgr/cephadm/use_agent': False}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_STRAY_DAEMON', 'CEPHADM_FAILED_DAEMON', 'CEPHADM_AGENT_DOWN'], 'log-only-match': ['CEPHADM_'], 'sha1': 'e911bdebe5c8faa3800735d1568fcdca65db60df'} 2026-03-09T15:25:57.320 INFO:tasks.cephadm:Cluster image is quay.io/ceph/ceph:v17.2.0 2026-03-09T15:25:57.320 INFO:tasks.cephadm:Cluster fsid is 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:25:57.321 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-03-09T15:25:57.321 INFO:tasks.cephadm:Monitor IPs: {'mon.a': '192.168.123.105', 'mon.c': '[v2:192.168.123.105:3301,v1:192.168.123.105:6790]', 'mon.b': '192.168.123.109'} 2026-03-09T15:25:57.321 INFO:tasks.cephadm:First mon is mon.a on vm05 2026-03-09T15:25:57.321 INFO:tasks.cephadm:First mgr is y 2026-03-09T15:25:57.321 INFO:tasks.cephadm:Normalizing hostnames... 2026-03-09T15:25:57.321 DEBUG:teuthology.orchestra.run.vm05:> sudo hostname $(hostname -s) 2026-03-09T15:25:57.357 DEBUG:teuthology.orchestra.run.vm09:> sudo hostname $(hostname -s) 2026-03-09T15:25:57.389 INFO:tasks.cephadm:Downloading cephadm (repo https://github.com/ceph/ceph ref v17.2.0)... 2026-03-09T15:25:57.390 DEBUG:teuthology.orchestra.run.vm05:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T15:25:57.639 INFO:teuthology.orchestra.run.vm05.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 9 15:25 /home/ubuntu/cephtest/cephadm 2026-03-09T15:25:57.639 DEBUG:teuthology.orchestra.run.vm09:> curl --silent https://raw.githubusercontent.com/ceph/ceph/v17.2.0/src/cephadm/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-03-09T15:25:57.726 INFO:teuthology.orchestra.run.vm09.stdout:-rw-r--r--. 1 ubuntu ubuntu 320521 Mar 9 15:25 /home/ubuntu/cephtest/cephadm 2026-03-09T15:25:57.726 DEBUG:teuthology.orchestra.run.vm05:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T15:25:57.749 DEBUG:teuthology.orchestra.run.vm09:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-03-09T15:25:57.775 INFO:tasks.cephadm:Pulling image quay.io/ceph/ceph:v17.2.0 on all hosts... 2026-03-09T15:25:57.775 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-09T15:25:57.791 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 pull 2026-03-09T15:25:58.017 INFO:teuthology.orchestra.run.vm09.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-09T15:25:58.024 INFO:teuthology.orchestra.run.vm05.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout: "repo_digests": [ 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-03-09T15:26:20.248 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout: "ceph_version": "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)", 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout: "image_id": "e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9", 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout: "repo_digests": [ 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout: "quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a", 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout: "quay.io/ceph/ceph@sha256:cb4d698cb769b6aba05bf6ef04f41a7fe694160140347576e13bd9348514b667" 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-03-09T15:26:21.229 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:26:21.248 DEBUG:teuthology.orchestra.run.vm05:> sudo mkdir -p /etc/ceph 2026-03-09T15:26:21.283 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /etc/ceph 2026-03-09T15:26:21.316 DEBUG:teuthology.orchestra.run.vm05:> sudo chmod 777 /etc/ceph 2026-03-09T15:26:21.352 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 777 /etc/ceph 2026-03-09T15:26:21.384 INFO:tasks.cephadm:Writing seed config... 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [global] mon election default strategy = 1 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [mgr] mgr/cephadm/use_agent = False 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-03-09T15:26:21.384 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-03-09T15:26:21.385 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:26:21.385 DEBUG:teuthology.orchestra.run.vm05:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-03-09T15:26:21.411 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea mon election default strategy = 1 [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = true bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 mgr/cephadm/use_agent = False [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-03-09T15:26:21.411 DEBUG:teuthology.orchestra.run.vm05:mon.a> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service 2026-03-09T15:26:21.453 DEBUG:teuthology.orchestra.run.vm05:mgr.y> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service 2026-03-09T15:26:21.495 INFO:tasks.cephadm:Bootstrapping... 2026-03-09T15:26:21.495 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 -v bootstrap --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-id a --mgr-id y --orphan-initial-daemons --skip-monitoring-stack --mon-ip 192.168.123.105 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:26:21.668 INFO:teuthology.orchestra.run.vm05.stderr:-------------------------------------------------------------------------------- 2026-03-09T15:26:21.669 INFO:teuthology.orchestra.run.vm05.stderr:cephadm ['--image', 'quay.io/ceph/ceph:v17.2.0', '-v', 'bootstrap', '--fsid', '452f6a00-1bcc-11f1-a1ee-7f1a2af01dea', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-id', 'a', '--mgr-id', 'y', '--orphan-initial-daemons', '--skip-monitoring-stack', '--mon-ip', '192.168.123.105', '--skip-admin-label'] 2026-03-09T15:26:21.688 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: 5.8.0 2026-03-09T15:26:21.695 INFO:teuthology.orchestra.run.vm05.stderr:Verifying podman|docker is present... 2026-03-09T15:26:21.713 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: 5.8.0 2026-03-09T15:26:21.719 INFO:teuthology.orchestra.run.vm05.stderr:Verifying lvm2 is present... 2026-03-09T15:26:21.719 INFO:teuthology.orchestra.run.vm05.stderr:Verifying time synchronization is in place... 2026-03-09T15:26:21.726 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T15:26:21.734 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: inactive 2026-03-09T15:26:21.743 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: enabled 2026-03-09T15:26:21.749 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: active 2026-03-09T15:26:21.750 INFO:teuthology.orchestra.run.vm05.stderr:Unit chronyd.service is enabled and running 2026-03-09T15:26:21.750 INFO:teuthology.orchestra.run.vm05.stderr:Repeating the final host check... 2026-03-09T15:26:21.774 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: 5.8.0 2026-03-09T15:26:21.777 INFO:teuthology.orchestra.run.vm05.stderr:podman (/bin/podman) version 5.8.0 is present 2026-03-09T15:26:21.777 INFO:teuthology.orchestra.run.vm05.stderr:systemctl is present 2026-03-09T15:26:21.777 INFO:teuthology.orchestra.run.vm05.stderr:lvcreate is present 2026-03-09T15:26:21.783 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Failed to get unit file state for chrony.service: No such file or directory 2026-03-09T15:26:21.789 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: inactive 2026-03-09T15:26:21.795 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: enabled 2026-03-09T15:26:21.801 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: active 2026-03-09T15:26:21.801 INFO:teuthology.orchestra.run.vm05.stderr:Unit chronyd.service is enabled and running 2026-03-09T15:26:21.801 INFO:teuthology.orchestra.run.vm05.stderr:Host looks OK 2026-03-09T15:26:21.801 INFO:teuthology.orchestra.run.vm05.stderr:Cluster fsid: 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:21.802 INFO:teuthology.orchestra.run.vm05.stderr:Acquiring lock 140227841117248 on /run/cephadm/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.lock 2026-03-09T15:26:21.802 INFO:teuthology.orchestra.run.vm05.stderr:Lock 140227841117248 acquired on /run/cephadm/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.lock 2026-03-09T15:26:21.802 INFO:teuthology.orchestra.run.vm05.stderr:Verifying IP 192.168.123.105 port 3300 ... 2026-03-09T15:26:21.802 INFO:teuthology.orchestra.run.vm05.stderr:Verifying IP 192.168.123.105 port 6789 ... 2026-03-09T15:26:21.802 INFO:teuthology.orchestra.run.vm05.stderr:Base mon IP is 192.168.123.105, final addrv is [v2:192.168.123.105:3300,v1:192.168.123.105:6789] 2026-03-09T15:26:21.805 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: default via 192.168.123.1 dev eth0 proto dhcp src 192.168.123.105 metric 100 2026-03-09T15:26:21.805 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: 192.168.123.0/24 dev eth0 proto kernel scope link src 192.168.123.105 metric 100 2026-03-09T15:26:21.811 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: ::1 dev lo proto kernel metric 256 pref medium 2026-03-09T15:26:21.811 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: fe80::/64 dev eth0 proto kernel metric 1024 pref medium 2026-03-09T15:26:21.814 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-03-09T15:26:21.814 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: inet6 ::1/128 scope host 2026-03-09T15:26:21.814 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-09T15:26:21.814 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: 2: eth0: mtu 1500 state UP qlen 1000 2026-03-09T15:26:21.814 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: inet6 fe80::5055:ff:fe00:5/64 scope link noprefixroute 2026-03-09T15:26:21.814 INFO:teuthology.orchestra.run.vm05.stderr:/sbin/ip: valid_lft forever preferred_lft forever 2026-03-09T15:26:21.815 INFO:teuthology.orchestra.run.vm05.stderr:Mon IP `192.168.123.105` is in CIDR network `192.168.123.0/24` 2026-03-09T15:26:21.815 INFO:teuthology.orchestra.run.vm05.stderr:- internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-03-09T15:26:21.816 INFO:teuthology.orchestra.run.vm05.stderr:Pulling container image quay.io/ceph/ceph:v17.2.0... 2026-03-09T15:26:21.844 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Trying to pull quay.io/ceph/ceph:v17.2.0... 2026-03-09T15:26:23.071 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Getting image source signatures 2026-03-09T15:26:23.072 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Copying blob sha256:33ca8fff7868c4dc0c11e09bca97c720eb9cfbab7221216754367dd8de70388a 2026-03-09T15:26:23.072 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Copying blob sha256:89b4a75bc2d8500f15463747507c9623df43886c134463e7f0527e70900e7a7b 2026-03-09T15:26:23.072 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Copying blob sha256:a70843738bb77e1ab9c1f85969ebdfa55f178e746be081d1cb4f94011f69eb7c 2026-03-09T15:26:23.072 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Copying blob sha256:c32ab78b488d0c72f64eded765c0cf6b5bf2c75dab66cb62a9d367fa6ec42513 2026-03-09T15:26:23.072 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Copying blob sha256:599d07cb321ff0a3c82224e1138fc685793fa69b93ed5780415751a5f7e4b8c2 2026-03-09T15:26:23.072 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Copying config sha256:e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-09T15:26:23.075 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: Writing manifest to image destination 2026-03-09T15:26:23.080 INFO:teuthology.orchestra.run.vm05.stderr:/bin/podman: e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 2026-03-09T15:26:23.243 INFO:teuthology.orchestra.run.vm05.stderr:ceph: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-09T15:26:23.274 INFO:teuthology.orchestra.run.vm05.stderr:Ceph version: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable) 2026-03-09T15:26:23.274 INFO:teuthology.orchestra.run.vm05.stderr:Extracting ceph user uid/gid from container image... 2026-03-09T15:26:23.361 INFO:teuthology.orchestra.run.vm05.stderr:stat: 167 167 2026-03-09T15:26:23.390 INFO:teuthology.orchestra.run.vm05.stderr:Creating initial keys... 2026-03-09T15:26:23.494 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph-authtool: AQCf5q5p4L1wHRAA8YOqW7FnIgIPdN+dKRcmfg== 2026-03-09T15:26:23.638 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph-authtool: AQCf5q5pjhUJJhAAyvzJmPfx7IE4TWE2kND0IA== 2026-03-09T15:26:23.760 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph-authtool: AQCf5q5p1gFTLRAABrmy2yX/bbEsrvZFRmeo0Q== 2026-03-09T15:26:23.787 INFO:teuthology.orchestra.run.vm05.stderr:Creating initial monmap... 2026-03-09T15:26:23.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T15:26:23.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/monmaptool: setting min_mon_release = octopus 2026-03-09T15:26:23.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: set fsid to 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:23.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/monmaptool: /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T15:26:23.934 INFO:teuthology.orchestra.run.vm05.stderr:monmaptool for a [v2:192.168.123.105:3300,v1:192.168.123.105:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-03-09T15:26:23.934 INFO:teuthology.orchestra.run.vm05.stderr:setting min_mon_release = octopus 2026-03-09T15:26:23.934 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/monmaptool: set fsid to 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:23.934 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-03-09T15:26:23.934 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:23.934 INFO:teuthology.orchestra.run.vm05.stderr:Creating mon... 2026-03-09T15:26:24.104 INFO:teuthology.orchestra.run.vm05.stderr:create mon.a on 2026-03-09T15:26:24.305 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-03-09T15:26:24.490 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.target → /etc/systemd/system/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.target. 2026-03-09T15:26:24.490 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Created symlink /etc/systemd/system/ceph.target.wants/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.target → /etc/systemd/system/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.target. 2026-03-09T15:26:24.853 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Failed to reset failed state of unit ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service: Unit ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service not loaded. 2026-03-09T15:26:24.867 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Created symlink /etc/systemd/system/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.target.wants/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service → /etc/systemd/system/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@.service. 2026-03-09T15:26:25.278 INFO:teuthology.orchestra.run.vm05.stderr:firewalld does not appear to be present 2026-03-09T15:26:25.278 INFO:teuthology.orchestra.run.vm05.stderr:Not possible to enable service . firewalld.service is not available 2026-03-09T15:26:25.278 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for mon to start... 2026-03-09T15:26:25.278 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for mon... 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: cluster: 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: id: 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: health: HEALTH_OK 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: services: 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon: 1 daemons, quorum a (age 0.190121s) 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mgr: no daemons active 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd: 0 osds: 0 up, 0 in 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: data: 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: pools: 0 pools, 0 pgs 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: objects: 0 objects, 0 B 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: usage: 0 B used, 0 B / 0 B avail 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: pgs: 2026-03-09T15:26:25.546 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:25.580 INFO:teuthology.orchestra.run.vm05.stderr:mon is available 2026-03-09T15:26:25.580 INFO:teuthology.orchestra.run.vm05.stderr:Assimilating anything we can from ceph.conf... 2026-03-09T15:26:25.819 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:25.819 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: [global] 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: fsid = 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_host = [v2:192.168.123.105:3300,v1:192.168.123.105:6789] 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: [mgr] 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mgr/cephadm/use_agent = False 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: [osd] 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-09T15:26:25.820 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-09T15:26:25.865 INFO:teuthology.orchestra.run.vm05.stderr:Generating new minimal ceph.conf... 2026-03-09T15:26:26.104 INFO:teuthology.orchestra.run.vm05.stderr:Restarting the monitor... 2026-03-09T15:26:26.187 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a[49411]: 2026-03-09T15:26:26.170+0000 7eff8e2bb700 -1 mon.a@0(leader) e1 *** Got Signal Terminated *** 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 podman[49666]: 2026-03-09 15:26:26.203276029 +0000 UTC m=+0.050503969 container died d4d8c362144811ce7ff8e154aa6d8db9d2eee4ca272717fedd57edcda1021e4e (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, RELEASE=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, name=centos-stream, build-date=2022-05-03T08:36:31.336870, architecture=x86_64, com.redhat.component=centos-stream-container, io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , io.openshift.expose-services=, release=754, vcs-type=git, vendor=Red Hat, Inc., ceph=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb) 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 podman[49666]: 2026-03-09 15:26:26.243167222 +0000 UTC m=+0.090395162 container remove d4d8c362144811ce7ff8e154aa6d8db9d2eee4ca272717fedd57edcda1021e4e (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, ceph=True, vendor=Red Hat, Inc., com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, vcs-type=git, RELEASE=HEAD, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.openshift.tags=base centos centos-stream, name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , build-date=2022-05-03T08:36:31.336870) 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 bash[49666]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 bash[49690]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon.a" found: no such container 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service: Deactivated successfully. 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 systemd[1]: Stopped Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:26:26.439 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 systemd[1]: Starting Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:26:26.495 INFO:teuthology.orchestra.run.vm05.stderr:Setting mon public_network to 192.168.123.0/24 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 podman[49741]: 2026-03-09 15:26:26.43896982 +0000 UTC m=+0.027791683 container create b31ac3c6697605a46f6e48fe77724f38c3a8fc53ea7c69605ffc8be894abd5a2 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, com.redhat.component=centos-stream-container, GIT_BRANCH=HEAD, distribution-scope=public, release=754, version=8, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, name=centos-stream, ceph=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_REPO=https://github.com/ceph/ceph-container.git, RELEASE=HEAD, architecture=x86_64, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vendor=Red Hat, Inc., maintainer=Guillaume Abrioux , io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 podman[49741]: 2026-03-09 15:26:26.479359035 +0000 UTC m=+0.068180898 container init b31ac3c6697605a46f6e48fe77724f38c3a8fc53ea7c69605ffc8be894abd5a2 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, RELEASE=HEAD, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_BRANCH=HEAD, distribution-scope=public, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vendor=Red Hat, Inc., io.buildah.version=1.19.8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, ceph=True, release=754, io.k8s.display-name=CentOS Stream 8, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, maintainer=Guillaume Abrioux , io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, build-date=2022-05-03T08:36:31.336870, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, architecture=x86_64, vcs-type=git, GIT_REPO=https://github.com/ceph/ceph-container.git, io.openshift.tags=base centos centos-stream, com.redhat.component=centos-stream-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 podman[49741]: 2026-03-09 15:26:26.482183014 +0000 UTC m=+0.071004877 container start b31ac3c6697605a46f6e48fe77724f38c3a8fc53ea7c69605ffc8be894abd5a2 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, architecture=x86_64, CEPH_POINT_RELEASE=-17.2.0, vendor=Red Hat, Inc., distribution-scope=public, release=754, version=8, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., RELEASE=HEAD, GIT_BRANCH=HEAD, ceph=True, io.openshift.expose-services=, build-date=2022-05-03T08:36:31.336870, vcs-type=git, maintainer=Guillaume Abrioux , io.buildah.version=1.19.8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, name=centos-stream, com.redhat.component=centos-stream-container, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 bash[49741]: b31ac3c6697605a46f6e48fe77724f38c3a8fc53ea7c69605ffc8be894abd5a2 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 podman[49741]: 2026-03-09 15:26:26.424468831 +0000 UTC m=+0.013290705 image pull e1d6a67b021eb077ee22bf650f1a9fb1980a2cf5c36bdb9cba9eac6de8f702d9 quay.io/ceph/ceph:v17.2.0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 systemd[1]: Started Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable), process ceph-mon, pid 2 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: pidfile_write: ignore empty --pid-file 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: load: jerasure load: lrc 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: RocksDB version: 6.15.5 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Git sha rocksdb_build_git_sha:@0@ 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Compile date Apr 18 2022 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: DB SUMMARY 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: DB Session ID: MFEZMNBZEWK3HHHD0TGB 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: CURRENT file: CURRENT 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: MANIFEST file: MANIFEST-000009 size: 131 Bytes 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000008.sst 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000010.log size: 73743 ; 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.error_if_exists: 0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.create_if_missing: 0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.paranoid_checks: 1 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.env: 0x55bdfa8c6860 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.fs: Posix File System 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.info_log: 0x55bdfbfdbdc0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.statistics: (nil) 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.use_fsync: 0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_log_file_size: 0 2026-03-09T15:26:26.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.allow_fallocate: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.use_direct_reads: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.db_log_dir: 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.wal_dir: /var/lib/ceph/mon/ceph-a/store.db 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.write_buffer_manager: 0x55bdfc0cc240 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.new_table_reader_for_compaction_inputs: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.unordered_write: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.row_cache: None 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.wal_filter: None 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.preserve_deletes: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.two_write_queues: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.atomic_flush: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.log_readahead_size: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_background_jobs: 2 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_background_compactions: -1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_subcompactions: 1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_open_files: -1 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T15:26:26.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_background_flushes: -1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Compression algorithms supported: 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kZSTD supported: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kXpressCompression supported: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kLZ4Compression supported: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kBZip2Compression supported: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kZlibCompression supported: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: kSnappyCompression supported: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/version_set.cc:4725] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000009 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/column_family.cc:597] --------------- Options for column family [default]: 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.merge_operator: 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_filter: None 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55bdfbfa7d00) 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: cache_index_and_filter_blocks: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: pin_top_level_index_and_filter: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: index_type: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: data_block_index_type: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: index_shortening: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: hash_index_allow_collision: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: checksum: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: no_block_cache: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache: 0x55bdfc012170 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache_name: BinnedLRUCache 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache_options: 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: capacity : 536870912 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: num_shard_bits : 4 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: strict_capacity_limit : 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: high_pri_pool_ratio: 0.000 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache_compressed: (nil) 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: persistent_cache: (nil) 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_size: 4096 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_size_deviation: 10 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_restart_interval: 16 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: index_block_restart_interval: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: metadata_block_size: 4096 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: partition_filters: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: use_delta_encoding: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: filter_policy: rocksdb.BuiltinBloomFilter 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: whole_key_filtering: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: verify_compression: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: read_amp_bytes_per_bit: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: format_version: 4 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: enable_index_compression: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout: block_align: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression: NoCompression 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.num_levels: 7 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T15:26:26.712 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.arena_block_size: 4194304 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.rate_limit_delay_max_milliseconds: 100 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T15:26:26.713 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.table_properties_collectors: 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.inplace_update_support: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.bloom_locality: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.max_successive_merges: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.ttl: 2592000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.enable_blob_files: false 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.min_blob_size: 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/version_set.cc:4773] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 11, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/version_set.cc:4782] Column family [default] (ID 0), log number is 5 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/version_set.cc:4083] Creating manifest 13 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773069986519255, "job": 1, "event": "recovery_started", "wal_files": [10]} 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/db_impl/db_impl_open.cc:847] Recovering log #10 mode 2 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [table/block_based/filter_policy.cc:996] Using legacy Bloom filter with high (20) bits/key. Dramatic filter space and/or accuracy improvement is available with format_version>=5. 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773069986520583, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 14, "file_size": 70715, "file_checksum": "", "file_checksum_func_name": "Unknown", "table_properties": {"data_size": 69032, "index_size": 176, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 581, "raw_key_size": 9687, "raw_average_key_size": 49, "raw_value_size": 63601, "raw_average_value_size": 324, "num_data_blocks": 8, "num_entries": 196, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1773069986, "oldest_key_time": 0, "file_creation_time": 0, "db_id": "7f44600a-9590-4b00-b146-56f8708c2222", "db_session_id": "MFEZMNBZEWK3HHHD0TGB"}} 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/version_set.cc:4083] Creating manifest 15 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773069986521462, "job": 1, "event": "recovery_finished"} 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [file/delete_scheduler.cc:73] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000010.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/db_impl/db_impl_open.cc:1701] SstFileManager instance 0x55bdfbff8700 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: DB pointer 0x55bdfc06c000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/db_impl/db_impl.cc:902] ------- DUMPING STATS ------- 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: rocksdb: [db/db_impl/db_impl.cc:903] 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: ** DB Stats ** 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 MB, 0.00 MB/s 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: L0 2/0 70.81 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Sum 2/0 70.81 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative compaction: 0.00 GB write, 6.94 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval compaction: 0.00 GB write, 6.94 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T15:26:26.714 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: L0 2/0 70.81 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Sum 2/0 70.81 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 59.1 0.00 0.00 1 0.001 0 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative compaction: 0.00 GB write, 6.92 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: mon.a is new leader, mons a in quorum (ranks 0) 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: monmap e1: 1 mons at {a=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0]} 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: fsmap 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: osdmap e1: 0 total, 0 up, 0 in 2026-03-09T15:26:26.715 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:26 vm05 ceph-mon[49764]: mgrmap e1: no daemons active 2026-03-09T15:26:26.770 INFO:teuthology.orchestra.run.vm05.stderr:Wrote config to /etc/ceph/ceph.conf 2026-03-09T15:26:26.770 INFO:teuthology.orchestra.run.vm05.stderr:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:26:26.770 INFO:teuthology.orchestra.run.vm05.stderr:Creating mgr... 2026-03-09T15:26:26.771 INFO:teuthology.orchestra.run.vm05.stderr:Verifying port 9283 ... 2026-03-09T15:26:26.968 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Failed to reset failed state of unit ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service: Unit ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service not loaded. 2026-03-09T15:26:26.985 INFO:teuthology.orchestra.run.vm05.stderr:systemctl: Created symlink /etc/systemd/system/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea.target.wants/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service → /etc/systemd/system/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@.service. 2026-03-09T15:26:27.320 INFO:teuthology.orchestra.run.vm05.stderr:firewalld does not appear to be present 2026-03-09T15:26:27.320 INFO:teuthology.orchestra.run.vm05.stderr:Not possible to enable service . firewalld.service is not available 2026-03-09T15:26:27.320 INFO:teuthology.orchestra.run.vm05.stderr:firewalld does not appear to be present 2026-03-09T15:26:27.320 INFO:teuthology.orchestra.run.vm05.stderr:Not possible to open ports <[9283]>. firewalld.service is not available 2026-03-09T15:26:27.320 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for mgr to start... 2026-03-09T15:26:27.320 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for mgr... 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsid": "452f6a00-1bcc-11f1-a1ee-7f1a2af01dea", 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "health": { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 0 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "a" 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_age": 1, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "available": false, 2026-03-09T15:26:27.592 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "iostat", 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "nfs", 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "restful" 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modified": "2026-03-09T15:26:25.358157+0000", 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T15:26:27.593 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:27.617 INFO:teuthology.orchestra.run.vm05.stderr:mgr not available, waiting (1/15)... 2026-03-09T15:26:27.981 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:27 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1624082814' entity='client.admin' 2026-03-09T15:26:27.981 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:27 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/510737578' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:26:29.907 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsid": "452f6a00-1bcc-11f1-a1ee-7f1a2af01dea", 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "health": { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 0 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "a" 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_age": 3, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "available": false, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "iostat", 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "nfs", 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "restful" 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.908 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T15:26:29.909 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:29.909 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modified": "2026-03-09T15:26:25.358157+0000", 2026-03-09T15:26:29.909 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:29.909 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:29.909 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T15:26:29.909 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:29.940 INFO:teuthology.orchestra.run.vm05.stderr:mgr not available, waiting (2/15)... 2026-03-09T15:26:30.031 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:29 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1268292403' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:26:32.205 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsid": "452f6a00-1bcc-11f1-a1ee-7f1a2af01dea", 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "health": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 0 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "a" 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_age": 5, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "available": false, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "iostat", 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "nfs", 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "restful" 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modified": "2026-03-09T15:26:25.358157+0000", 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:32.206 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T15:26:32.207 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:32.282 INFO:teuthology.orchestra.run.vm05.stderr:mgr not available, waiting (3/15)... 2026-03-09T15:26:32.576 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:32 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1884064807' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: Activating manager daemon y 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: mgrmap e2: y(active, starting, since 0.00405586s) 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: Manager daemon y is now available 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' 2026-03-09T15:26:33.684 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:33 vm05 ceph-mon[49764]: from='mgr.14100 192.168.123.105:0/3627591709' entity='mgr.y' 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsid": "452f6a00-1bcc-11f1-a1ee-7f1a2af01dea", 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "health": { 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "status": "HEALTH_OK", 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "checks": {}, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mutes": [] 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "election_epoch": 5, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum": [ 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 0 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_names": [ 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "a" 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "quorum_age": 8, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "monmap": { 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "min_mon_release_name": "quincy", 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_mons": 1 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osdmap": { 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_osds": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_up_osds": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_up_since": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_in_osds": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "osd_in_since": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_remapped_pgs": 0 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgmap": { 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "pgs_by_state": [], 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pgs": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_pools": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_objects": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "data_bytes": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_used": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_avail": 0, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "bytes_total": 0 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "fsmap": { 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "by_rank": [], 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "up:standby": 0 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.558 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mgrmap": { 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "available": true, 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_standbys": 0, 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modules": [ 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "iostat", 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "nfs", 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "restful" 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ], 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "servicemap": { 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 1, 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "modified": "2026-03-09T15:26:25.358157+0000", 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "services": {} 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: }, 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "progress_events": {} 2026-03-09T15:26:34.559 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:34.586 INFO:teuthology.orchestra.run.vm05.stderr:mgr is available 2026-03-09T15:26:34.852 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: [global] 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: fsid = 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_osd_allow_pg_remap = true 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_osd_allow_primary_affinity = true 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mon_warn_on_no_sortbitwise = false 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_crush_chooseleaf_type = 0 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: [mgr] 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: mgr/telemetry/nag = false 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: [osd] 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_map_max_advance = 10 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_mclock_iops_capacity_threshold_hdd = 49000 2026-03-09T15:26:34.853 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: osd_sloppy_crc = true 2026-03-09T15:26:34.885 INFO:teuthology.orchestra.run.vm05.stderr:Enabling cephadm module... 2026-03-09T15:26:34.950 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:34 vm05 ceph-mon[49764]: mgrmap e3: y(active, since 1.00859s) 2026-03-09T15:26:34.950 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:34 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3241739993' entity='client.admin' cmd=[{"prefix": "status", "format": "json-pretty"}]: dispatch 2026-03-09T15:26:35.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:35 vm05 ceph-mon[49764]: mgrmap e4: y(active, since 2s) 2026-03-09T15:26:35.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:35 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1157990803' entity='client.admin' cmd=[{"prefix": "config assimilate-conf"}]: dispatch 2026-03-09T15:26:35.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:35 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1157990803' entity='client.admin' cmd='[{"prefix": "config assimilate-conf"}]': finished 2026-03-09T15:26:35.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:35 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1434382697' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "cephadm"}]: dispatch 2026-03-09T15:26:36.263 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:36.263 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 5, 2026-03-09T15:26:36.263 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "available": true, 2026-03-09T15:26:36.263 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-09T15:26:36.263 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-09T15:26:36.263 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:36.302 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for the mgr to restart... 2026-03-09T15:26:36.302 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for mgr epoch 5... 2026-03-09T15:26:37.001 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:36 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1434382697' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-03-09T15:26:37.001 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:36 vm05 ceph-mon[49764]: mgrmap e5: y(active, since 3s) 2026-03-09T15:26:37.001 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:36 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/390125039' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T15:26:40.780 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:40 vm05 ceph-mon[49764]: Active manager daemon y restarted 2026-03-09T15:26:40.780 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:40 vm05 ceph-mon[49764]: Activating manager daemon y 2026-03-09T15:26:40.780 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:40 vm05 ceph-mon[49764]: osdmap e2: 0 total, 0 up, 0 in 2026-03-09T15:26:41.609 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:41.609 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mgrmap_epoch": 7, 2026-03-09T15:26:41.609 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "initialized": true 2026-03-09T15:26:41.609 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:41.636 INFO:teuthology.orchestra.run.vm05.stderr:mgr epoch 5 is available 2026-03-09T15:26:41.636 INFO:teuthology.orchestra.run.vm05.stderr:Setting orchestrator backend to cephadm... 2026-03-09T15:26:41.847 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: mgrmap e6: y(active, starting, since 0.0543657s) 2026-03-09T15:26:41.847 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:26:41.847 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:26:41.847 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:26:41.847 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: Manager daemon y is now available 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:41.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:41 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:42.216 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: value unchanged 2026-03-09T15:26:42.253 INFO:teuthology.orchestra.run.vm05.stderr:Generating ssh key... 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: [09/Mar/2026:15:26:41] ENGINE Bus STARTING 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: [09/Mar/2026:15:26:41] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: [09/Mar/2026:15:26:41] ENGINE Bus STARTED 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: mgrmap e7: y(active, since 1.06545s) 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: from='client.14124 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: from='client.14132 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:42.854 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:42 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:43.002 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjjoKisWWb14WDl+sAjjb04l9fRa8vIbWvuHS16m8dyeFc33fg0Lga0zT1DKtVorOWjc8hWS/xNSUV31nhdQgWGq/Xg3sJz4wBfN+B55BpIIeiznEnBRaO/5T2d+cck9UwNgdv/5QoNBueiiOKooku4Wuy+D5iIeaFHB9QTENB+p16oAN0QaIwqWFbOvpnE4TC9lktoo/HxdKwiumbah5xYq8FIz/ox9BwrUmK2i9iFajfs5pN7GQTRG/rRCxw2Ws2kVvG0tM0AcHf9NKt8kLpvx5PiNIpZBoT19f3RPbfYbxZky0vvKmIq9rODu7Xf85WCeYKLu3Fz6HtN1PzROSbqgMKp5iGACoDfdlh76PYWu7Q+WrFKV6mCTs97cNfDbiMulb+QNa9s09xWkeNd28joXIIcEnGWTpYoBUri0ut1QfjrriUINAbUd6NJJzMQKd6CYVcbHy7Nme7aofdLKn/tG3vhpe5nd5V1gPPmF+g8QEPJturWIVFrlck476k2qc= ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:43.028 INFO:teuthology.orchestra.run.vm05.stderr:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-03-09T15:26:43.029 INFO:teuthology.orchestra.run.vm05.stderr:Adding key to root@localhost authorized_keys... 2026-03-09T15:26:43.029 INFO:teuthology.orchestra.run.vm05.stderr:Adding host vm05... 2026-03-09T15:26:43.862 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:43 vm05 ceph-mon[49764]: from='client.14134 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:43.862 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:43 vm05 ceph-mon[49764]: from='client.14136 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:43.862 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:43 vm05 ceph-mon[49764]: Generating ssh key... 2026-03-09T15:26:43.862 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:43 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:43.862 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:43 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:43.862 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:43 vm05 ceph-mon[49764]: mgrmap e8: y(active, since 2s) 2026-03-09T15:26:44.001 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: Added host 'vm05' with addr '192.168.123.105' 2026-03-09T15:26:44.055 INFO:teuthology.orchestra.run.vm05.stderr:Deploying unmanaged mon service... 2026-03-09T15:26:44.439 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: Scheduled mon update... 2026-03-09T15:26:44.498 INFO:teuthology.orchestra.run.vm05.stderr:Deploying unmanaged mgr service... 2026-03-09T15:26:44.745 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: Scheduled mgr update... 2026-03-09T15:26:44.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:44 vm05 ceph-mon[49764]: from='client.14138 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:44.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:44 vm05 ceph-mon[49764]: from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm05", "addr": "192.168.123.105", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:44.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:44 vm05 ceph-mon[49764]: Deploying cephadm binary to vm05 2026-03-09T15:26:44.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:44 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:44.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:44 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:44.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:44 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:45.364 INFO:teuthology.orchestra.run.vm05.stderr:Enabling the dashboard module... 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: Added host vm05 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: from='client.14142 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: Saving service mon spec with placement count:5 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: from='client.14144 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "unmanaged": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: Saving service mgr spec with placement count:2 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1837934756' entity='client.admin' 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1015661375' entity='client.admin' 2026-03-09T15:26:45.875 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/4272997092' entity='client.admin' cmd=[{"prefix": "mgr module enable", "module": "dashboard"}]: dispatch 2026-03-09T15:26:46.836 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:46.836 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "epoch": 9, 2026-03-09T15:26:46.836 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "available": true, 2026-03-09T15:26:46.836 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "active_name": "y", 2026-03-09T15:26:46.836 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "num_standby": 0 2026-03-09T15:26:46.836 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:46.880 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for the mgr to restart... 2026-03-09T15:26:46.880 INFO:teuthology.orchestra.run.vm05.stderr:Waiting for mgr epoch 9... 2026-03-09T15:26:46.893 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:46.889+0000 7fb514778000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:26:46.893 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:46 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:46.893 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:46 vm05 ceph-mon[49764]: from='mgr.14120 192.168.123.105:0/2147886559' entity='mgr.y' 2026-03-09T15:26:46.893 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:46 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/4272997092' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-03-09T15:26:46.893 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:46 vm05 ceph-mon[49764]: mgrmap e9: y(active, since 5s) 2026-03-09T15:26:47.397 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:47 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:47.371+0000 7fb514778000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:26:47.902 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:47 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1080788455' entity='client.admin' cmd=[{"prefix": "mgr stat"}]: dispatch 2026-03-09T15:26:47.904 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:47 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:47.752+0000 7fb514778000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:26:48.155 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:47 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:47.931+0000 7fb514778000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:26:48.155 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:47 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:47.996+0000 7fb514778000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:26:48.410 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:48 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:48.180+0000 7fb514778000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:26:48.915 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:48 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:48.814+0000 7fb514778000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:26:49.169 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.006+0000 7fb514778000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:26:49.169 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.067+0000 7fb514778000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:26:49.169 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.124+0000 7fb514778000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:26:49.485 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.191+0000 7fb514778000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:26:49.485 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.254+0000 7fb514778000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:26:49.790 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.599+0000 7fb514778000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:26:49.790 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:49.679+0000 7fb514778000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:26:50.293 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.289+0000 7fb514778000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:26:50.544 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.360+0000 7fb514778000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:26:50.544 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.433+0000 7fb514778000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:26:50.796 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.572+0000 7fb514778000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:26:50.796 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.641+0000 7fb514778000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:26:50.796 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.752+0000 7fb514778000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:26:51.050 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:50.853+0000 7fb514778000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:26:51.302 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:51.206+0000 7fb514778000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:26:51.302 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:26:51.280+0000 7fb514778000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:26:51.576 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:51 vm05 ceph-mon[49764]: Active manager daemon y restarted 2026-03-09T15:26:51.576 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:51 vm05 ceph-mon[49764]: Activating manager daemon y 2026-03-09T15:26:51.576 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:51 vm05 ceph-mon[49764]: osdmap e3: 0 total, 0 up, 0 in 2026-03-09T15:26:51.830 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:26:51] ENGINE Bus STARTING 2026-03-09T15:26:52.082 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:26:51] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:26:52.082 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:26:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:26:51] ENGINE Bus STARTED 2026-03-09T15:26:52.367 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: { 2026-03-09T15:26:52.367 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "mgrmap_epoch": 11, 2026-03-09T15:26:52.367 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: "initialized": true 2026-03-09T15:26:52.367 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: } 2026-03-09T15:26:52.439 INFO:teuthology.orchestra.run.vm05.stderr:mgr epoch 9 is available 2026-03-09T15:26:52.439 INFO:teuthology.orchestra.run.vm05.stderr:Generating a dashboard self-signed certificate... 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: mgrmap e10: y(active, starting, since 0.0541393s) 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: Manager daemon y is now available 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:52.595 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:52.863 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: Self-signed certificate created 2026-03-09T15:26:52.923 INFO:teuthology.orchestra.run.vm05.stderr:Creating initial admin user... 2026-03-09T15:26:53.491 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: {"username": "admin", "password": "$2b$12$ZZaiIQGTGuC6wDsemvXYquZJgG2y/jKXHHUUCF1dQatKWVC.vAzDW", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1773070013, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-03-09T15:26:53.528 INFO:teuthology.orchestra.run.vm05.stderr:Fetching dashboard port number... 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: [09/Mar/2026:15:26:51] ENGINE Bus STARTING 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: [09/Mar/2026:15:26:51] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: [09/Mar/2026:15:26:51] ENGINE Bus STARTED 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: mgrmap e11: y(active, since 1.06587s) 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: from='client.14156 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:53.605 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:53.805 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: 8443 2026-03-09T15:26:53.836 INFO:teuthology.orchestra.run.vm05.stderr:firewalld does not appear to be present 2026-03-09T15:26:53.836 INFO:teuthology.orchestra.run.vm05.stderr:Not possible to open ports <[8443]>. firewalld.service is not available 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr:Ceph Dashboard is now available at: 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr: URL: https://vm05.local:8443/ 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr: User: admin 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr: Password: ja354r3y35 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:53.838 INFO:teuthology.orchestra.run.vm05.stderr:Enabling autotune for osd_memory_target 2026-03-09T15:26:54.533 INFO:teuthology.orchestra.run.vm05.stderr:/usr/bin/ceph: set mgr/dashboard/cluster/status 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr:You can access the Ceph CLI with: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: sudo /home/ubuntu/cephtest/cephadm shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr:Please consider enabling telemetry to help improve Ceph: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: ceph telemetry on 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr:For more information see: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: https://docs.ceph.com/docs/master/mgr/telemetry/ 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:26:54.589 INFO:teuthology.orchestra.run.vm05.stderr:Bootstrap complete. 2026-03-09T15:26:54.618 INFO:tasks.cephadm:Fetching config... 2026-03-09T15:26:54.619 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:26:54.619 DEBUG:teuthology.orchestra.run.vm05:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-03-09T15:26:54.652 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-03-09T15:26:54.652 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:26:54.652 DEBUG:teuthology.orchestra.run.vm05:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-03-09T15:26:54.682 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:54 vm05 ceph-mon[49764]: from='client.14164 -' entity='client.admin' cmd=[{"prefix": "dashboard create-self-signed-cert", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:54.683 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:54 vm05 ceph-mon[49764]: from='client.14166 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:54.683 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:54 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:54.683 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:54 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1934346008' entity='client.admin' cmd=[{"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"}]: dispatch 2026-03-09T15:26:54.683 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:54 vm05 ceph-mon[49764]: mgrmap e12: y(active, since 2s) 2026-03-09T15:26:54.711 INFO:tasks.cephadm:Fetching mon keyring... 2026-03-09T15:26:54.711 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:26:54.711 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/keyring of=/dev/stdout 2026-03-09T15:26:54.810 INFO:tasks.cephadm:Fetching pub ssh key... 2026-03-09T15:26:54.810 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:26:54.810 DEBUG:teuthology.orchestra.run.vm05:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-03-09T15:26:54.876 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-03-09T15:26:54.876 DEBUG:teuthology.orchestra.run.vm05:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjjoKisWWb14WDl+sAjjb04l9fRa8vIbWvuHS16m8dyeFc33fg0Lga0zT1DKtVorOWjc8hWS/xNSUV31nhdQgWGq/Xg3sJz4wBfN+B55BpIIeiznEnBRaO/5T2d+cck9UwNgdv/5QoNBueiiOKooku4Wuy+D5iIeaFHB9QTENB+p16oAN0QaIwqWFbOvpnE4TC9lktoo/HxdKwiumbah5xYq8FIz/ox9BwrUmK2i9iFajfs5pN7GQTRG/rRCxw2Ws2kVvG0tM0AcHf9NKt8kLpvx5PiNIpZBoT19f3RPbfYbxZky0vvKmIq9rODu7Xf85WCeYKLu3Fz6HtN1PzROSbqgMKp5iGACoDfdlh76PYWu7Q+WrFKV6mCTs97cNfDbiMulb+QNa9s09xWkeNd28joXIIcEnGWTpYoBUri0ut1QfjrriUINAbUd6NJJzMQKd6CYVcbHy7Nme7aofdLKn/tG3vhpe5nd5V1gPPmF+g8QEPJturWIVFrlck476k2qc= ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T15:26:54.981 INFO:teuthology.orchestra.run.vm05.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjjoKisWWb14WDl+sAjjb04l9fRa8vIbWvuHS16m8dyeFc33fg0Lga0zT1DKtVorOWjc8hWS/xNSUV31nhdQgWGq/Xg3sJz4wBfN+B55BpIIeiznEnBRaO/5T2d+cck9UwNgdv/5QoNBueiiOKooku4Wuy+D5iIeaFHB9QTENB+p16oAN0QaIwqWFbOvpnE4TC9lktoo/HxdKwiumbah5xYq8FIz/ox9BwrUmK2i9iFajfs5pN7GQTRG/rRCxw2Ws2kVvG0tM0AcHf9NKt8kLpvx5PiNIpZBoT19f3RPbfYbxZky0vvKmIq9rODu7Xf85WCeYKLu3Fz6HtN1PzROSbqgMKp5iGACoDfdlh76PYWu7Q+WrFKV6mCTs97cNfDbiMulb+QNa9s09xWkeNd28joXIIcEnGWTpYoBUri0ut1QfjrriUINAbUd6NJJzMQKd6CYVcbHy7Nme7aofdLKn/tG3vhpe5nd5V1gPPmF+g8QEPJturWIVFrlck476k2qc= ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:55.001 DEBUG:teuthology.orchestra.run.vm09:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjjoKisWWb14WDl+sAjjb04l9fRa8vIbWvuHS16m8dyeFc33fg0Lga0zT1DKtVorOWjc8hWS/xNSUV31nhdQgWGq/Xg3sJz4wBfN+B55BpIIeiznEnBRaO/5T2d+cck9UwNgdv/5QoNBueiiOKooku4Wuy+D5iIeaFHB9QTENB+p16oAN0QaIwqWFbOvpnE4TC9lktoo/HxdKwiumbah5xYq8FIz/ox9BwrUmK2i9iFajfs5pN7GQTRG/rRCxw2Ws2kVvG0tM0AcHf9NKt8kLpvx5PiNIpZBoT19f3RPbfYbxZky0vvKmIq9rODu7Xf85WCeYKLu3Fz6HtN1PzROSbqgMKp5iGACoDfdlh76PYWu7Q+WrFKV6mCTs97cNfDbiMulb+QNa9s09xWkeNd28joXIIcEnGWTpYoBUri0ut1QfjrriUINAbUd6NJJzMQKd6CYVcbHy7Nme7aofdLKn/tG3vhpe5nd5V1gPPmF+g8QEPJturWIVFrlck476k2qc= ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-03-09T15:26:55.039 INFO:teuthology.orchestra.run.vm09.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCjjoKisWWb14WDl+sAjjb04l9fRa8vIbWvuHS16m8dyeFc33fg0Lga0zT1DKtVorOWjc8hWS/xNSUV31nhdQgWGq/Xg3sJz4wBfN+B55BpIIeiznEnBRaO/5T2d+cck9UwNgdv/5QoNBueiiOKooku4Wuy+D5iIeaFHB9QTENB+p16oAN0QaIwqWFbOvpnE4TC9lktoo/HxdKwiumbah5xYq8FIz/ox9BwrUmK2i9iFajfs5pN7GQTRG/rRCxw2Ws2kVvG0tM0AcHf9NKt8kLpvx5PiNIpZBoT19f3RPbfYbxZky0vvKmIq9rODu7Xf85WCeYKLu3Fz6HtN1PzROSbqgMKp5iGACoDfdlh76PYWu7Q+WrFKV6mCTs97cNfDbiMulb+QNa9s09xWkeNd28joXIIcEnGWTpYoBUri0ut1QfjrriUINAbUd6NJJzMQKd6CYVcbHy7Nme7aofdLKn/tG3vhpe5nd5V1gPPmF+g8QEPJturWIVFrlck476k2qc= ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:26:55.050 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-03-09T15:26:55.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:55 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2133211771' entity='client.admin' 2026-03-09T15:26:55.828 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-03-09T15:26:55.828 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-03-09T15:26:56.399 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm09 2026-03-09T15:26:56.399 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:26:56.399 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.conf 2026-03-09T15:26:56.416 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:26:56.416 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:26:56.472 INFO:tasks.cephadm:Adding host vm09 to orchestrator... 2026-03-09T15:26:56.472 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch host add vm09 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1563650822' entity='client.admin' 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='client.14176 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:26:56.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:56 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:26:57.873 INFO:teuthology.orchestra.run.vm05.stdout:Added host 'vm09' with addr '192.168.123.109' 2026-03-09T15:26:57.933 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch host ls --format=json 2026-03-09T15:26:58.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='client.14178 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:26:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:58.463 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:26:58.463 INFO:teuthology.orchestra.run.vm05.stdout:[{"addr": "192.168.123.105", "hostname": "vm05", "labels": [], "status": ""}, {"addr": "192.168.123.109", "hostname": "vm09", "labels": [], "status": ""}] 2026-03-09T15:26:58.547 INFO:tasks.cephadm:Setting crush tunables to default 2026-03-09T15:26:58.547 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd crush tunables default 2026-03-09T15:26:59.302 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:59 vm05 ceph-mon[49764]: Deploying cephadm binary to vm09 2026-03-09T15:26:59.302 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:59 vm05 ceph-mon[49764]: Added host vm09 2026-03-09T15:26:59.302 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:59 vm05 ceph-mon[49764]: mgrmap e13: y(active, since 6s) 2026-03-09T15:26:59.302 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:59.302 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:26:59.302 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:26:59 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1856287508' entity='client.admin' cmd=[{"prefix": "osd crush tunables", "profile": "default"}]: dispatch 2026-03-09T15:26:59.718 INFO:teuthology.orchestra.run.vm05.stderr:adjusted tunables profile to default 2026-03-09T15:26:59.774 INFO:tasks.cephadm:Adding mon.a on vm05 2026-03-09T15:26:59.774 INFO:tasks.cephadm:Adding mon.c on vm05 2026-03-09T15:26:59.774 INFO:tasks.cephadm:Adding mon.b on vm09 2026-03-09T15:26:59.774 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch apply mon '3;vm05:192.168.123.105=a;vm05:[v2:192.168.123.105:3301,v1:192.168.123.105:6790]=c;vm09:192.168.123.109=b' 2026-03-09T15:27:00.288 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mon update... 2026-03-09T15:27:00.313 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: from='client.14180 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:27:00.314 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1856287508' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-03-09T15:27:00.314 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T15:27:00.314 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:00.314 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:00.314 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:00.314 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:00.341 DEBUG:teuthology.orchestra.run.vm05:mon.c> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.c.service 2026-03-09T15:27:00.342 DEBUG:teuthology.orchestra.run.vm09:mon.b> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.b.service 2026-03-09T15:27:00.344 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-09T15:27:00.345 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph mon dump -f json 2026-03-09T15:27:00.935 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T15:27:00.935 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea","modified":"2026-03-09T15:26:23.907274Z","created":"2026-03-09T15:26:23.907274Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:3300","nonce":0},{"type":"v1","addr":"192.168.123.105:6789","nonce":0}]},"addr":"192.168.123.105:6789/0","public_addr":"192.168.123.105:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-03-09T15:27:00.937 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "3;vm05:192.168.123.105=a;vm05:[v2:192.168.123.105:3301,v1:192.168.123.105:6790]=c;vm09:192.168.123.109=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: Saving service mon spec with placement vm05:192.168.123.105=a;vm05:[v2:192.168.123.105:3301,v1:192.168.123.105:6790]=c;vm09:192.168.123.109=b;count:3 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:01.359 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:01 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/3359683498' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T15:27:01.986 INFO:tasks.cephadm:Waiting for 3 mons in monmap... 2026-03-09T15:27:01.986 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph mon dump -f json 2026-03-09T15:27:03.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:03 vm09 ceph-mon[49358]: mon.b@-1(synchronizing) e2 handle_conf_change mon_allow_pool_delete,mon_cluster_log_to_file 2026-03-09T15:27:07.167 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: Deploying daemon mon.b on vm09 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: mon.a calling monitor election 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: mon.c calling monitor election 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: monmap e2: 2 mons at {a=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0],c=[v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0]} 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: fsmap 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: mgrmap e13: y(active, since 15s) 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: overall HEALTH_OK 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:07.168 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:06 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: mon.a calling monitor election 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: mon.c calling monitor election 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: mon.b calling monitor election 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: monmap e3: 3 mons at {a=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0],b=[v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0],c=[v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0]} 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: fsmap 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: mgrmap e13: y(active, since 21s) 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: overall HEALTH_OK 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.463 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:12 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: Deploying daemon mon.b on vm09 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.a calling monitor election 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.c calling monitor election 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.a is new leader, mons a,c in quorum (ranks 0,1) 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: monmap e2: 2 mons at {a=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0],c=[v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0]} 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: fsmap 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mgrmap e13: y(active, since 15s) 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: overall HEALTH_OK 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.a calling monitor election 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.c calling monitor election 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.b calling monitor election 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: monmap e3: 3 mons at {a=[v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0],b=[v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0],c=[v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0]} 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: fsmap 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: osdmap e4: 0 total, 0 up, 0 in 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: mgrmap e13: y(active, since 21s) 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: overall HEALTH_OK 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:12 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:12.919 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T15:27:12.919 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":3,"fsid":"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea","modified":"2026-03-09T15:27:07.289243Z","created":"2026-03-09T15:26:23.907274Z","min_mon_release":17,"min_mon_release_name":"quincy","election_strategy":1,"disallowed_leaders: ":"","stretch_mode":false,"tiebreaker_mon":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:3300","nonce":0},{"type":"v1","addr":"192.168.123.105:6789","nonce":0}]},"addr":"192.168.123.105:6789/0","public_addr":"192.168.123.105:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"c","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:3301","nonce":0},{"type":"v1","addr":"192.168.123.105:6790","nonce":0}]},"addr":"192.168.123.105:6790/0","public_addr":"192.168.123.105:6790/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":2,"name":"b","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:3300","nonce":0},{"type":"v1","addr":"192.168.123.109:6789","nonce":0}]},"addr":"192.168.123.109:6789/0","public_addr":"192.168.123.109:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1,2]} 2026-03-09T15:27:12.921 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 3 2026-03-09T15:27:12.968 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-03-09T15:27:12.969 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph config generate-minimal-conf 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/2454279131' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.470 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.627 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:13.628 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:27:13.628 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:13.628 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/2454279131' entity='client.admin' cmd=[{"prefix": "mon dump", "format": "json"}]: dispatch 2026-03-09T15:27:13.628 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:13.628 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:13.628 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:27:13.674 INFO:teuthology.orchestra.run.vm05.stdout:# minimal ceph.conf for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:27:13.674 INFO:teuthology.orchestra.run.vm05.stdout:[global] 2026-03-09T15:27:13.674 INFO:teuthology.orchestra.run.vm05.stdout: fsid = 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:27:13.674 INFO:teuthology.orchestra.run.vm05.stdout: mon_host = [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] 2026-03-09T15:27:13.756 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-03-09T15:27:13.756 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:27:13.756 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T15:27:13.795 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:27:13.795 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:27:13.874 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:27:13.874 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.conf 2026-03-09T15:27:13.897 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:27:13.897 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:27:13.963 INFO:tasks.cephadm:Adding mgr.y on vm05 2026-03-09T15:27:13.964 INFO:tasks.cephadm:Adding mgr.x on vm09 2026-03-09T15:27:13.964 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch apply mgr '2;vm05=y;vm09=x' 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: Reconfiguring daemon mon.a on vm05 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: Reconfiguring mon.c (monmap changed)... 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: Reconfiguring daemon mon.c on vm05 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: Reconfiguring mon.b (monmap changed)... 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: Reconfiguring daemon mon.b on vm09 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2255996464' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.448 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:14 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.448 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mgr update... 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: Reconfiguring mon.a (unknown last config time)... 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: Reconfiguring daemon mon.a on vm05 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: Reconfiguring mon.c (monmap changed)... 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: Reconfiguring daemon mon.c on vm05 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: Reconfiguring mon.b (monmap changed)... 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: Reconfiguring daemon mon.b on vm09 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2255996464' entity='client.admin' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.474 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:14 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:14.499 DEBUG:teuthology.orchestra.run.vm09:mgr.x> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service 2026-03-09T15:27:14.501 INFO:tasks.cephadm:Deploying OSDs... 2026-03-09T15:27:14.501 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:27:14.501 DEBUG:teuthology.orchestra.run.vm05:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T15:27:14.521 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:27:14.522 DEBUG:teuthology.orchestra.run.vm05:> ls /dev/[sv]d? 2026-03-09T15:27:14.585 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vda 2026-03-09T15:27:14.585 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vdb 2026-03-09T15:27:14.585 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vdc 2026-03-09T15:27:14.585 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vdd 2026-03-09T15:27:14.585 INFO:teuthology.orchestra.run.vm05.stdout:/dev/vde 2026-03-09T15:27:14.585 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T15:27:14.585 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T15:27:14.585 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vdb 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vdb 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 254 Links: 1 Device type: fc,10 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-09 15:26:55.222800244 +0000 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-09 15:26:54.902799544 +0000 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-09 15:26:54.902799544 +0000 2026-03-09T15:27:14.647 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-09 15:23:14.331000000 +0000 2026-03-09T15:27:14.647 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T15:27:14.720 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-09T15:27:14.720 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-09T15:27:14.720 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000109605 s, 4.7 MB/s 2026-03-09T15:27:14.721 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T15:27:14.780 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vdc 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vdc 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-09 15:26:55.297800408 +0000 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-09 15:26:54.911799563 +0000 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-09 15:26:54.911799563 +0000 2026-03-09T15:27:14.843 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-09 15:23:14.333000000 +0000 2026-03-09T15:27:14.843 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T15:27:14.913 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-09T15:27:14.914 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-09T15:27:14.914 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000180698 s, 2.8 MB/s 2026-03-09T15:27:14.915 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T15:27:14.982 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vdd 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vdd 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-09 15:26:55.392800616 +0000 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-09 15:26:54.909799559 +0000 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-09 15:26:54.909799559 +0000 2026-03-09T15:27:15.044 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-09 15:23:14.339000000 +0000 2026-03-09T15:27:15.045 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T15:27:15.123 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-09T15:27:15.123 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-09T15:27:15.123 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000189033 s, 2.7 MB/s 2026-03-09T15:27:15.124 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T15:27:15.193 DEBUG:teuthology.orchestra.run.vm05:> stat /dev/vde 2026-03-09T15:27:15.258 INFO:teuthology.orchestra.run.vm05.stdout: File: /dev/vde 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout:Access: 2026-03-09 15:26:55.461800767 +0000 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout:Modify: 2026-03-09 15:26:54.910799561 +0000 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout:Change: 2026-03-09 15:26:54.910799561 +0000 2026-03-09T15:27:15.259 INFO:teuthology.orchestra.run.vm05.stdout: Birth: 2026-03-09 15:23:14.344000000 +0000 2026-03-09T15:27:15.259 DEBUG:teuthology.orchestra.run.vm05:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T15:27:15.345 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records in 2026-03-09T15:27:15.345 INFO:teuthology.orchestra.run.vm05.stderr:1+0 records out 2026-03-09T15:27:15.345 INFO:teuthology.orchestra.run.vm05.stderr:512 bytes copied, 0.000179025 s, 2.9 MB/s 2026-03-09T15:27:15.346 DEBUG:teuthology.orchestra.run.vm05:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T15:27:15.378 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:27:15.378 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm05=y;vm09=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: Saving service mgr spec with placement vm05=y;vm09=x;count:2 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:15.400 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:15.403 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:27:15.404 DEBUG:teuthology.orchestra.run.vm09:> ls /dev/[sv]d? 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='client.24104 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "placement": "2;vm05=y;vm09=x", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: Saving service mgr spec with placement vm05=y;vm09=x;count:2 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:15.515 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vda 2026-03-09T15:27:15.516 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdb 2026-03-09T15:27:15.516 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdc 2026-03-09T15:27:15.516 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vdd 2026-03-09T15:27:15.516 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vde 2026-03-09T15:27:15.516 WARNING:teuthology.misc:Removing root device: /dev/vda from device list 2026-03-09T15:27:15.516 DEBUG:teuthology.misc:devs=['/dev/vdb', '/dev/vdc', '/dev/vdd', '/dev/vde'] 2026-03-09T15:27:15.516 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdb 2026-03-09T15:27:15.574 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdb 2026-03-09T15:27:15.574 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:15.574 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 246 Links: 1 Device type: fc,10 2026-03-09T15:27:15.574 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:15.574 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:15.575 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 15:26:59.528818017 +0000 2026-03-09T15:27:15.575 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 15:26:59.217817636 +0000 2026-03-09T15:27:15.575 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 15:26:59.217817636 +0000 2026-03-09T15:27:15.575 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 15:22:43.324000000 +0000 2026-03-09T15:27:15.575 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdb of=/dev/null count=1 2026-03-09T15:27:15.610 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T15:27:15.610 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T15:27:15.610 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000378908 s, 1.4 MB/s 2026-03-09T15:27:15.613 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdb 2026-03-09T15:27:15.734 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdc 2026-03-09T15:27:15.773 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdc 2026-03-09T15:27:15.773 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 255 Links: 1 Device type: fc,20 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 15:26:59.605818112 +0000 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 15:26:59.226817647 +0000 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 15:26:59.226817647 +0000 2026-03-09T15:27:15.774 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 15:22:43.335000000 +0000 2026-03-09T15:27:15.774 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdc of=/dev/null count=1 2026-03-09T15:27:15.880 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T15:27:15.880 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T15:27:15.880 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000135477 s, 3.8 MB/s 2026-03-09T15:27:15.881 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdc 2026-03-09T15:27:15.903 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vdd 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vdd 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 256 Links: 1 Device type: fc,30 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 15:26:59.677818200 +0000 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 15:26:59.221817641 +0000 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 15:26:59.221817641 +0000 2026-03-09T15:27:15.984 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 15:22:43.338000000 +0000 2026-03-09T15:27:15.984 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vdd of=/dev/null count=1 2026-03-09T15:27:16.051 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:15.983+0000 7ff7655b4000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:27:16.053 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T15:27:16.053 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T15:27:16.053 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000127591 s, 4.0 MB/s 2026-03-09T15:27:16.055 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vdd 2026-03-09T15:27:16.119 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vde 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vde 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 512 block special file 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout:Device: 6h/6d Inode: 257 Links: 1 Device type: fc,40 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout:Context: system_u:object_r:fixed_disk_device_t:s0 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-03-09 15:26:59.750818289 +0000 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-03-09 15:26:59.223817643 +0000 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-03-09 15:26:59.223817643 +0000 2026-03-09T15:27:16.180 INFO:teuthology.orchestra.run.vm09.stdout: Birth: 2026-03-09 15:22:43.342000000 +0000 2026-03-09T15:27:16.180 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vde of=/dev/null count=1 2026-03-09T15:27:16.249 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-03-09T15:27:16.249 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-03-09T15:27:16.249 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000348159 s, 1.5 MB/s 2026-03-09T15:27:16.251 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vde 2026-03-09T15:27:16.316 INFO:tasks.cephadm:Deploying osd.0 on vm05 with /dev/vde... 2026-03-09T15:27:16.316 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vde 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: Deploying daemon mgr.x on vm09 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:27:16.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:16.792 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:16.391+0000 7ff7655b4000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:16.546+0000 7ff7655b4000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:16.602+0000 7ff7655b4000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: Deploying daemon mgr.x on vm09 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:27:16.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:16.999 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:27:17.034 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm05:/dev/vde 2026-03-09T15:27:17.061 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:16.791+0000 7ff7655b4000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:27:17.508 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: Reconfiguring mgr.y (unknown last config time)... 2026-03-09T15:27:17.508 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: Reconfiguring daemon mgr.y on vm05 2026-03-09T15:27:17.508 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:17.508 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:17.508 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:17.508 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:17.509 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:17.509 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:17.458+0000 7ff7655b4000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: Reconfiguring mgr.y (unknown last config time)... 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: Reconfiguring daemon mgr.y on vm05 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:17.658 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:17.913 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:17.657+0000 7ff7655b4000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:27:17.913 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:17.719+0000 7ff7655b4000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:27:17.914 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:17.781+0000 7ff7655b4000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:27:17.914 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:17.850+0000 7ff7655b4000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:27:18.224 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:17.912+0000 7ff7655b4000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:27:18.519 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:18.519 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2837201718' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "04a1f096-2671-4227-83a4-258146ba498d"}]: dispatch 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "04a1f096-2671-4227-83a4-258146ba498d"}]: dispatch 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "04a1f096-2671-4227-83a4-258146ba498d"}]': finished 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T15:27:18.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:18.223+0000 7ff7655b4000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:18.296+0000 7ff7655b4000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2837201718' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "04a1f096-2671-4227-83a4-258146ba498d"}]: dispatch 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "04a1f096-2671-4227-83a4-258146ba498d"}]: dispatch 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "04a1f096-2671-4227-83a4-258146ba498d"}]': finished 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: osdmap e5: 1 total, 0 up, 1 in 2026-03-09T15:27:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:19.133 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:18.877+0000 7ff7655b4000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:27:19.133 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:18.941+0000 7ff7655b4000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:27:19.133 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.007+0000 7ff7655b4000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:27:19.404 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.132+0000 7ff7655b4000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:27:19.404 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.193+0000 7ff7655b4000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:27:19.404 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.292+0000 7ff7655b4000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:27:19.404 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.380+0000 7ff7655b4000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:27:19.525 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:19 vm05 ceph-mon[49764]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:19.525 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:19 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3446800218' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:19.694 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:19 vm09 ceph-mon[49358]: from='client.14205 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:19.694 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:19 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3446800218' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:20.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.693+0000 7ff7655b4000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:27:20.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:27:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:27:19.759+0000 7ff7655b4000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:27:20.531 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:20 vm05 ceph-mon[49764]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:20.531 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:20 vm05 ceph-mon[49764]: Standby manager daemon x started 2026-03-09T15:27:20.531 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:20 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:27:20.531 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:20 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:27:20.531 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:20 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:27:20.531 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:20 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:27:20.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:20 vm09 ceph-mon[49358]: pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:20.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:20 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:27:20.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:20 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:27:20.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:20 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:27:20.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:20 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:27:20.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:20 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1169762928' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:27:21.545 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:21 vm05 ceph-mon[49764]: mgrmap e14: y(active, since 29s), standbys: x 2026-03-09T15:27:21.545 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:21 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:27:21.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:21 vm09 ceph-mon[49358]: mgrmap e14: y(active, since 29s), standbys: x 2026-03-09T15:27:21.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:21 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:27:22.546 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:22 vm05 ceph-mon[49764]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:22.546 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:22 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:27:22.546 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:22 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:22.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:22 vm09 ceph-mon[49358]: pgmap v10: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:22.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:22 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:27:22.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:22 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:23.558 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:23 vm05 ceph-mon[49764]: Deploying daemon osd.0 on vm05 2026-03-09T15:27:23.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:23 vm09 ceph-mon[49358]: Deploying daemon osd.0 on vm05 2026-03-09T15:27:24.572 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:24 vm05 ceph-mon[49764]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:24.572 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:24.572 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:24.572 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:24.572 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:24.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:24 vm09 ceph-mon[49358]: pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:24.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:24.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:24.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:24.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:24.915 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 0 on host 'vm05' 2026-03-09T15:27:24.980 DEBUG:teuthology.orchestra.run.vm05:osd.0> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.0.service 2026-03-09T15:27:24.981 INFO:tasks.cephadm:Deploying osd.1 on vm05 with /dev/vdd... 2026-03-09T15:27:24.981 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vdd 2026-03-09T15:27:25.830 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:25.830 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:25.830 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:25.830 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:25.831 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:25.831 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:25.831 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:25 vm05 ceph-mon[49764]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:25 vm09 ceph-mon[49358]: pgmap v12: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:26.083 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:27:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[57406]: 2026-03-09T15:27:25.928+0000 7f44171e83c0 -1 osd.0 0 log_to_monitors true 2026-03-09T15:27:26.720 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:27:26.740 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm05:/dev/vdd 2026-03-09T15:27:27.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:26 vm05 ceph-mon[49764]: from='osd.0 [v2:192.168.123.105:6802/1831508895,v1:192.168.123.105:6803/1831508895]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:27:27.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:26 vm05 ceph-mon[49764]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:27:27.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:26 vm05 ceph-mon[49764]: Detected new or changed devices on vm05 2026-03-09T15:27:27.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:27.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:27.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:27.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:26 vm09 ceph-mon[49358]: from='osd.0 [v2:192.168.123.105:6802/1831508895,v1:192.168.123.105:6803/1831508895]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:27:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:26 vm09 ceph-mon[49358]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:27:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:26 vm09 ceph-mon[49358]: Detected new or changed devices on vm05 2026-03-09T15:27:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='osd.0 [v2:192.168.123.105:6802/1831508895,v1:192.168.123.105:6803/1831508895]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='client.24136 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:27 vm05 ceph-mon[49764]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:28.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:27:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[57406]: 2026-03-09T15:27:27.938+0000 7f440dbeb700 -1 osd.0 0 waiting for initial osdmap 2026-03-09T15:27:28.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:27:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[57406]: 2026-03-09T15:27:27.944+0000 7f4409584700 -1 osd.0 7 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:27:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T15:27:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: osdmap e6: 1 total, 0 up, 1 in 2026-03-09T15:27:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='osd.0 [v2:192.168.123.105:6802/1831508895,v1:192.168.123.105:6803/1831508895]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='client.24136 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:27 vm09 ceph-mon[49358]: pgmap v14: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3489249477' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a281303c-8662-4f54-8846-33be08391553"}]: dispatch 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a281303c-8662-4f54-8846-33be08391553"}]: dispatch 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: osd.0 [v2:192.168.123.105:6802/1831508895,v1:192.168.123.105:6803/1831508895] boot 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a281303c-8662-4f54-8846-33be08391553"}]': finished 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:28 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1922470636' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: osdmap e7: 1 total, 0 up, 1 in 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3489249477' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a281303c-8662-4f54-8846-33be08391553"}]: dispatch 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "a281303c-8662-4f54-8846-33be08391553"}]: dispatch 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: osd.0 [v2:192.168.123.105:6802/1831508895,v1:192.168.123.105:6803/1831508895] boot 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "a281303c-8662-4f54-8846-33be08391553"}]': finished 2026-03-09T15:27:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: osdmap e8: 2 total, 1 up, 2 in 2026-03-09T15:27:29.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:27:29.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:29.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:28 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1922470636' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:30.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:29 vm05 ceph-mon[49764]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:30.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:29 vm05 ceph-mon[54361]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:30.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:29 vm09 ceph-mon[49358]: pgmap v17: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:31.619 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:31 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:27:31.619 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:31 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:31.619 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:31 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:27:31.619 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:31 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:31.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:31 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:27:31.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:31 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:32.682 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:32 vm05 ceph-mon[54361]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:32.682 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:32 vm05 ceph-mon[54361]: Deploying daemon osd.1 on vm05 2026-03-09T15:27:32.682 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:32 vm05 ceph-mon[49764]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:32.682 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:32 vm05 ceph-mon[49764]: Deploying daemon osd.1 on vm05 2026-03-09T15:27:32.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:32 vm09 ceph-mon[49358]: pgmap v18: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:32.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:32 vm09 ceph-mon[49358]: Deploying daemon osd.1 on vm05 2026-03-09T15:27:33.667 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 1 on host 'vm05' 2026-03-09T15:27:33.741 DEBUG:teuthology.orchestra.run.vm05:osd.1> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.1.service 2026-03-09T15:27:33.743 INFO:tasks.cephadm:Deploying osd.2 on vm05 with /dev/vdc... 2026-03-09T15:27:33.743 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vdc 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:33.879 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:33 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:34.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:34.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: pgmap v19: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:33 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:34.735 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:27:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[60125]: 2026-03-09T15:27:34.472+0000 7fa4e4da83c0 -1 osd.1 0 log_to_monitors true 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[49764]: from='osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[49764]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[54361]: from='osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[54361]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:35.173 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:34 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:35.214 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:27:35.228 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm05:/dev/vdc 2026-03-09T15:27:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:34 vm09 ceph-mon[49358]: from='osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:27:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:34 vm09 ceph-mon[49358]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:27:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:34 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:34 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:34 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: Detected new or changed devices on vm05 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: Detected new or changed devices on vm05 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:27:35.970 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:35.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:35 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:36.235 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:27:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[60125]: 2026-03-09T15:27:35.971+0000 7fa4db7ab700 -1 osd.1 0 waiting for initial osdmap 2026-03-09T15:27:36.235 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:27:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[60125]: 2026-03-09T15:27:35.996+0000 7fa4d5941700 -1 osd.1 10 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: Detected new or changed devices on vm05 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: osdmap e9: 2 total, 1 up, 2 in 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: pgmap v21: 0 pgs: ; 0 B data, 4.8 MiB used, 20 GiB / 20 GiB avail 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:35 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='client.24151 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/914873242' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3556c187-377e-47cc-8f72-be4edaa111a4"}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181] boot 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/914873242' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3556c187-377e-47cc-8f72-be4edaa111a4"}]': finished 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: osdmap e11: 3 total, 2 up, 3 in 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='client.24151 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/914873242' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3556c187-377e-47cc-8f72-be4edaa111a4"}]: dispatch 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181] boot 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/914873242' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3556c187-377e-47cc-8f72-be4edaa111a4"}]': finished 2026-03-09T15:27:37.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: osdmap e11: 3 total, 2 up, 3 in 2026-03-09T15:27:37.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:36 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='client.24151 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: osdmap e10: 2 total, 1 up, 2 in 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/914873242' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "3556c187-377e-47cc-8f72-be4edaa111a4"}]: dispatch 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: osd.1 [v2:192.168.123.105:6810/3783980181,v1:192.168.123.105:6811/3783980181] boot 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/914873242' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "3556c187-377e-47cc-8f72-be4edaa111a4"}]': finished 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: osdmap e11: 3 total, 2 up, 3 in 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:27:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:36 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1739568586' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[49764]: pgmap v24: 0 pgs: ; 0 B data, 9.5 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1739568586' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:38.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:37 vm05 ceph-mon[54361]: pgmap v24: 0 pgs: ; 0 B data, 9.5 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:38.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:37 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:27:38.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:37 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:27:38.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:37 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1739568586' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:38.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:37 vm09 ceph-mon[49358]: pgmap v24: 0 pgs: ; 0 B data, 9.5 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:40.537 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:40 vm05 ceph-mon[49764]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:40.537 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:40 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:27:40.538 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:40 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:40.538 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:40 vm05 ceph-mon[54361]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:40.538 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:40 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:27:40.538 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:40 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:40.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:40 vm09 ceph-mon[49358]: pgmap v25: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:40.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:40 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:27:40.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:40 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:41.557 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:41 vm05 ceph-mon[49764]: Deploying daemon osd.2 on vm05 2026-03-09T15:27:41.557 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:41 vm05 ceph-mon[54361]: Deploying daemon osd.2 on vm05 2026-03-09T15:27:41.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:41 vm09 ceph-mon[49358]: Deploying daemon osd.2 on vm05 2026-03-09T15:27:42.252 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 2 on host 'vm05' 2026-03-09T15:27:42.311 DEBUG:teuthology.orchestra.run.vm05:osd.2> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.2.service 2026-03-09T15:27:42.313 INFO:tasks.cephadm:Deploying osd.3 on vm05 with /dev/vdb... 2026-03-09T15:27:42.313 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vdb 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: pgmap v26: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: pgmap v26: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:42 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: pgmap v26: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:42.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:42 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:42.985 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:27:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[62868]: 2026-03-09T15:27:42.607+0000 7f3350bfd3c0 -1 osd.2 0 log_to_monitors true 2026-03-09T15:27:43.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:43 vm05 ceph-mon[49764]: from='osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:27:43.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:43 vm05 ceph-mon[49764]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:27:43.738 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:43 vm05 ceph-mon[54361]: from='osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:27:43.738 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:43 vm05 ceph-mon[54361]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:27:43.809 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:27:43.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:43 vm09 ceph-mon[49358]: from='osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:27:43.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:43 vm09 ceph-mon[49358]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:27:43.821 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm05:/dev/vdb 2026-03-09T15:27:44.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:44.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T15:27:44.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: osdmap e12: 3 total, 2 up, 3 in 2026-03-09T15:27:44.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:44.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:44.520 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: Detected new or changed devices on vm05 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: osdmap e12: 3 total, 2 up, 3 in 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: Detected new or changed devices on vm05 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:44 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:44.521 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:27:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[62868]: 2026-03-09T15:27:44.441+0000 7f3347600700 -1 osd.2 0 waiting for initial osdmap 2026-03-09T15:27:44.521 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:27:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[62868]: 2026-03-09T15:27:44.454+0000 7f3343f9b700 -1 osd.2 13 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: pgmap v27: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: osdmap e12: 3 total, 2 up, 3 in 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='osd.2 ' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: Detected new or changed devices on vm05 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:44.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:44.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:44.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:44.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:44.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:44 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: osdmap e13: 3 total, 2 up, 3 in 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2162154143' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe8044ac-8ad4-4057-a7f5-e8e77db769c3"}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004] boot 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2162154143' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fe8044ac-8ad4-4057-a7f5-e8e77db769c3"}]': finished 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: osdmap e14: 4 total, 3 up, 4 in 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1672392167' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: osdmap e13: 3 total, 2 up, 3 in 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2162154143' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe8044ac-8ad4-4057-a7f5-e8e77db769c3"}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004] boot 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2162154143' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fe8044ac-8ad4-4057-a7f5-e8e77db769c3"}]': finished 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: osdmap e14: 4 total, 3 up, 4 in 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1672392167' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='client.14262 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm05:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: osdmap e13: 3 total, 2 up, 3 in 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2162154143' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "fe8044ac-8ad4-4057-a7f5-e8e77db769c3"}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: osd.2 [v2:192.168.123.105:6818/2261056004,v1:192.168.123.105:6819/2261056004] boot 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2162154143' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "fe8044ac-8ad4-4057-a7f5-e8e77db769c3"}]': finished 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: osdmap e14: 4 total, 3 up, 4 in 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1672392167' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: pgmap v31: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: osdmap e15: 4 total, 3 up, 4 in 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: pgmap v31: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: osdmap e15: 4 total, 3 up, 4 in 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:46 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: pgmap v31: 0 pgs: ; 0 B data, 9.6 MiB used, 40 GiB / 40 GiB avail 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]: dispatch 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32}]': finished 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: osdmap e15: 4 total, 3 up, 4 in 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:46 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]: dispatch 2026-03-09T15:27:48.178 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-09T15:27:48.178 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[49764]: osdmap e16: 4 total, 3 up, 4 in 2026-03-09T15:27:48.178 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:48.178 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[49764]: pgmap v34: 1 pgs: 1 creating+activating; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:48.179 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-09T15:27:48.179 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[54361]: osdmap e16: 4 total, 3 up, 4 in 2026-03-09T15:27:48.179 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:48.179 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:47 vm05 ceph-mon[54361]: pgmap v34: 1 pgs: 1 creating+activating; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:48.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:47 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-03-09T15:27:48.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:47 vm09 ceph-mon[49358]: osdmap e16: 4 total, 3 up, 4 in 2026-03-09T15:27:48.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:47 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:48.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:47 vm09 ceph-mon[49358]: pgmap v34: 1 pgs: 1 creating+activating; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:49.030 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[49764]: osdmap e17: 4 total, 3 up, 4 in 2026-03-09T15:27:49.030 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:49.030 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:27:49.030 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:49.030 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[49764]: Deploying daemon osd.3 on vm05 2026-03-09T15:27:49.031 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[54361]: osdmap e17: 4 total, 3 up, 4 in 2026-03-09T15:27:49.031 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:49.031 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:27:49.031 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:49.031 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:48 vm05 ceph-mon[54361]: Deploying daemon osd.3 on vm05 2026-03-09T15:27:49.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:48 vm09 ceph-mon[49358]: osdmap e17: 4 total, 3 up, 4 in 2026-03-09T15:27:49.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:48 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:49.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:48 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:27:49.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:48 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:49.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:48 vm09 ceph-mon[49358]: Deploying daemon osd.3 on vm05 2026-03-09T15:27:49.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:49 vm05 ceph-mon[49764]: pgmap v36: 1 pgs: 1 creating+activating; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:49 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:49.986 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:27:49 vm05 sudo[65608]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vde 2026-03-09T15:27:49.986 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:27:49 vm05 sudo[65608]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T15:27:49.986 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:27:49 vm05 sudo[65608]: pam_unix(sudo:session): session closed for user root 2026-03-09T15:27:49.986 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:27:49 vm05 sudo[65675]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdd 2026-03-09T15:27:49.986 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:27:49 vm05 sudo[65675]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T15:27:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:49 vm05 ceph-mon[54361]: pgmap v36: 1 pgs: 1 creating+activating; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:49 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:50.252 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:27:49 vm05 sudo[65675]: pam_unix(sudo:session): session closed for user root 2026-03-09T15:27:50.253 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65813]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vdc 2026-03-09T15:27:50.253 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65813]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T15:27:50.253 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65813]: pam_unix(sudo:session): session closed for user root 2026-03-09T15:27:50.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:49 vm09 ceph-mon[49358]: pgmap v36: 1 pgs: 1 creating+activating; 0 B data, 15 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:50.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:49 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:50.620 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65884]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T15:27:50.620 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65884]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T15:27:50.620 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65884]: pam_unix(sudo:session): session closed for user root 2026-03-09T15:27:50.908 INFO:teuthology.orchestra.run.vm05.stdout:Created osd(s) 3 on host 'vm05' 2026-03-09T15:27:50.908 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65957]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T15:27:50.908 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65957]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T15:27:50.908 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 sudo[65957]: pam_unix(sudo:session): session closed for user root 2026-03-09T15:27:50.985 DEBUG:teuthology.orchestra.run.vm05:osd.3> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.3.service 2026-03-09T15:27:50.989 INFO:tasks.cephadm:Deploying osd.4 on vm09 with /dev/vde... 2026-03-09T15:27:50.989 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vde 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 sudo[50935]: ceph : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/smartctl -x --json=o /dev/vda 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 sudo[50935]: pam_unix(sudo:session): session opened for user root by (uid=0) 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 sudo[50935]: pam_unix(sudo:session): session closed for user root 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.129 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:50 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:51.160 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-03-09T15:27:51.161 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:50 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:51.531 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T15:27:51.547 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm09:/dev/vde 2026-03-09T15:27:51.735 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:27:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[65656]: 2026-03-09T15:27:51.571+0000 7f209e37f3c0 -1 osd.3 0 log_to_monitors true 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: pgmap v37: 1 pgs: 1 creating+activating; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:52.391 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:52 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: pgmap v37: 1 pgs: 1 creating+activating; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: pgmap v37: 1 pgs: 1 creating+activating; 0 B data, 16 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:27:52.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:52 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:52.985 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:27:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[65656]: 2026-03-09T15:27:52.731+0000 7f2094d82700 -1 osd.3 0 waiting for initial osdmap 2026-03-09T15:27:52.985 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:27:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[65656]: 2026-03-09T15:27:52.736+0000 7f208ef18700 -1 osd.3 19 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='client.24199 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: mgrmap e15: y(active, since 60s), standbys: x 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: Detected new or changed devices on vm05 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: osdmap e18: 4 total, 3 up, 4 in 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/2259936478' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]': finished 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: osdmap e19: 5 total, 3 up, 5 in 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='client.24199 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: mgrmap e15: y(active, since 60s), standbys: x 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: Detected new or changed devices on vm05 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: osdmap e18: 4 total, 3 up, 4 in 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/2259936478' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]': finished 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: osdmap e19: 5 total, 3 up, 5 in 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:27:53.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:53 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='client.24199 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vde", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: mgrmap e15: y(active, since 60s), standbys: x 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: Detected new or changed devices on vm05 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: osdmap e18: 4 total, 3 up, 4 in 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/2259936478' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]: dispatch 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]': finished 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "c4e71d46-f1e6-4cdb-b025-41a19b086c9f"}]': finished 2026-03-09T15:27:53.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: osdmap e19: 5 total, 3 up, 5 in 2026-03-09T15:27:53.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:53.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:27:53.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:53 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:54.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/2373166553' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[49764]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[49764]: osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141] boot 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[49764]: osdmap e20: 5 total, 4 up, 5 in 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/2373166553' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[54361]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[54361]: osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141] boot 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[54361]: osdmap e20: 5 total, 4 up, 5 in 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:54.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:54 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:27:54.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:54 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/2373166553' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:27:54.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:54 vm09 ceph-mon[49358]: pgmap v40: 1 pgs: 1 active+clean; 449 KiB data, 17 MiB used, 60 GiB / 60 GiB avail 2026-03-09T15:27:54.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:54 vm09 ceph-mon[49358]: osd.3 [v2:192.168.123.105:6826/2164219141,v1:192.168.123.105:6827/2164219141] boot 2026-03-09T15:27:54.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:54 vm09 ceph-mon[49358]: osdmap e20: 5 total, 4 up, 5 in 2026-03-09T15:27:54.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:54 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:27:54.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:54 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:27:55.372 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:55 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:27:55.372 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:55 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:27:55.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:55 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:27:55.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:55 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:27:55.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:55 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:27:55.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:55 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:27:56.235 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:56 vm09 ceph-mon[49358]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:56.235 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:56 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:27:56.235 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:56 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:56.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:56 vm05 ceph-mon[49764]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:56.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:27:56.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:56 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:56.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:56 vm05 ceph-mon[54361]: pgmap v42: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:56.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:56 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:27:56.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:56 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:57.469 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:57 vm09 ceph-mon[49358]: Deploying daemon osd.4 on vm09 2026-03-09T15:27:57.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:57 vm05 ceph-mon[49764]: Deploying daemon osd.4 on vm09 2026-03-09T15:27:57.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:57 vm05 ceph-mon[54361]: Deploying daemon osd.4 on vm09 2026-03-09T15:27:58.232 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:58 vm09 ceph-mon[49358]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:58.233 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:58 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:58.233 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:58 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:58.233 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:58 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:58.233 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:58 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[49764]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[54361]: pgmap v43: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:58.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:58 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:58.587 INFO:teuthology.orchestra.run.vm09.stdout:Created osd(s) 4 on host 'vm09' 2026-03-09T15:27:58.642 DEBUG:teuthology.orchestra.run.vm09:osd.4> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.4.service 2026-03-09T15:27:58.644 INFO:tasks.cephadm:Deploying osd.5 on vm09 with /dev/vdd... 2026-03-09T15:27:58.644 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vdd 2026-03-09T15:27:59.403 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T15:27:59.417 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:27:59 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[52679]: 2026-03-09T15:27:59.334+0000 7f6872c743c0 -1 osd.4 0 log_to_monitors true 2026-03-09T15:27:59.419 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm09:/dev/vdd 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:27:59.499 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:27:59.504 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[49764]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:27:59.668 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:27:59 vm09 ceph-mon[49358]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:27:59.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:27:59 vm05 ceph-mon[54361]: pgmap v44: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:28:00.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: osdmap e21: 5 total, 4 up, 5 in 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='client.24212 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: Detected new or changed devices on vm09 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: Adjusting osd_memory_target on vm09 to 257.0M 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: Unable to set osd_memory_target on vm09 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-09T15:28:00.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:00 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:00.812 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:28:00 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[52679]: 2026-03-09T15:28:00.604+0000 7f6869677700 -1 osd.4 0 waiting for initial osdmap 2026-03-09T15:28:00.812 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:28:00 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[52679]: 2026-03-09T15:28:00.617+0000 7f6865811700 -1 osd.4 22 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: osdmap e21: 5 total, 4 up, 5 in 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='client.24212 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: Detected new or changed devices on vm09 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: Adjusting osd_memory_target on vm09 to 257.0M 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: Unable to set osd_memory_target on vm09 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: osdmap e21: 5 total, 4 up, 5 in 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='client.24212 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdd", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: Detected new or changed devices on vm09 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: Adjusting osd_memory_target on vm09 to 257.0M 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: Unable to set osd_memory_target on vm09 to 269536460: error parsing value: Value '269536460' is below minimum 939524096 2026-03-09T15:28:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:00 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: osdmap e22: 5 total, 4 up, 5 in 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/2782441788' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b092f491-9b09-4c77-81aa-03a6adc5b415"}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566] boot 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/2782441788' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b092f491-9b09-4c77-81aa-03a6adc5b415"}]': finished 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: osdmap e23: 6 total, 5 up, 6 in 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/679251020' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: osdmap e24: 6 total, 5 up, 6 in 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: osdmap e22: 5 total, 4 up, 5 in 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/2782441788' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b092f491-9b09-4c77-81aa-03a6adc5b415"}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566] boot 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/2782441788' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b092f491-9b09-4c77-81aa-03a6adc5b415"}]': finished 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: osdmap e23: 6 total, 5 up, 6 in 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/679251020' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: osdmap e24: 6 total, 5 up, 6 in 2026-03-09T15:28:01.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:01 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: osdmap e22: 5 total, 4 up, 5 in 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/2782441788' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "b092f491-9b09-4c77-81aa-03a6adc5b415"}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: osd.4 [v2:192.168.123.109:6800/364460566,v1:192.168.123.109:6801/364460566] boot 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/2782441788' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "b092f491-9b09-4c77-81aa-03a6adc5b415"}]': finished 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: osdmap e23: 6 total, 5 up, 6 in 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: pgmap v48: 1 pgs: 1 active+clean; 449 KiB data, 22 MiB used, 80 GiB / 80 GiB avail 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/679251020' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: osdmap e24: 6 total, 5 up, 6 in 2026-03-09T15:28:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:01 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[49764]: osdmap e25: 6 total, 5 up, 6 in 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[54361]: osdmap e25: 6 total, 5 up, 6 in 2026-03-09T15:28:03.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:02 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:03.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:02 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:28:03.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:02 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:28:03.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:02 vm09 ceph-mon[49358]: osdmap e25: 6 total, 5 up, 6 in 2026-03-09T15:28:03.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:02 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:04.204 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:03 vm09 ceph-mon[49358]: pgmap v51: 1 pgs: 1 peering; 449 KiB data, 27 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:04.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:03 vm05 ceph-mon[49764]: pgmap v51: 1 pgs: 1 peering; 449 KiB data, 27 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:04.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:03 vm05 ceph-mon[54361]: pgmap v51: 1 pgs: 1 peering; 449 KiB data, 27 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:05.038 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:04 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:28:05.039 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:04 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:05.039 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:04 vm09 ceph-mon[49358]: Deploying daemon osd.5 on vm09 2026-03-09T15:28:05.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:04 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:28:05.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:04 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:05.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:04 vm05 ceph-mon[49764]: Deploying daemon osd.5 on vm09 2026-03-09T15:28:05.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:04 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:28:05.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:04 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:05.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:04 vm05 ceph-mon[54361]: Deploying daemon osd.5 on vm09 2026-03-09T15:28:06.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:05 vm09 ceph-mon[49358]: pgmap v52: 1 pgs: 1 peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:06.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:05 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:06.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:05 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:06.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:05 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:06.090 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:05 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[49764]: pgmap v52: 1 pgs: 1 peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[54361]: pgmap v52: 1 pgs: 1 peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:06.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:05 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:06.619 INFO:teuthology.orchestra.run.vm09.stdout:Created osd(s) 5 on host 'vm09' 2026-03-09T15:28:06.671 DEBUG:teuthology.orchestra.run.vm09:osd.5> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.5.service 2026-03-09T15:28:06.673 INFO:tasks.cephadm:Deploying osd.6 on vm09 with /dev/vdc... 2026-03-09T15:28:06.673 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vdc 2026-03-09T15:28:07.796 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:28:07 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[55451]: 2026-03-09T15:28:07.412+0000 7fbc0ac043c0 -1 osd.5 0 log_to_monitors true 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:28:07.796 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:07 vm09 ceph-mon[49358]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[49764]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: pgmap v53: 1 pgs: 1 active+clean; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail; 60 KiB/s, 0 objects/s recovering 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:28:07.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:07 vm05 ceph-mon[54361]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:28:08.299 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T15:28:08.316 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm09:/dev/vdc 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: osdmap e26: 6 total, 5 up, 6 in 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: Detected new or changed devices on vm09 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: Adjusting osd_memory_target on vm09 to 128.5M 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: Unable to set osd_memory_target on vm09 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-09T15:28:08.795 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:08 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:08.795 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:28:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[55451]: 2026-03-09T15:28:08.639+0000 7fbc01607700 -1 osd.5 0 waiting for initial osdmap 2026-03-09T15:28:08.795 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:28:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[55451]: 2026-03-09T15:28:08.649+0000 7fbbfc79f700 -1 osd.5 27 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:28:08.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: osdmap e26: 6 total, 5 up, 6 in 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: Detected new or changed devices on vm09 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: Adjusting osd_memory_target on vm09 to 128.5M 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: Unable to set osd_memory_target on vm09 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: osdmap e26: 6 total, 5 up, 6 in 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: Detected new or changed devices on vm09 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: Adjusting osd_memory_target on vm09 to 128.5M 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: Unable to set osd_memory_target on vm09 to 134768230: error parsing value: Value '134768230' is below minimum 939524096 2026-03-09T15:28:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:08 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: osdmap e27: 6 total, 5 up, 6 in 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='client.24239 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: pgmap v56: 1 pgs: 1 peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/108511842' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496] boot 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]': finished 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: osdmap e28: 7 total, 6 up, 7 in 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: osdmap e27: 6 total, 5 up, 6 in 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='client.24239 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: pgmap v56: 1 pgs: 1 peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/108511842' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496] boot 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]': finished 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: osdmap e28: 7 total, 6 up, 7 in 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:09 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: osdmap e27: 6 total, 5 up, 6 in 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='client.24239 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdc", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: pgmap v56: 1 pgs: 1 peering; 449 KiB data, 28 MiB used, 100 GiB / 100 GiB avail 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/108511842' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: osd.5 [v2:192.168.123.109:6808/1669811496,v1:192.168.123.109:6809/1669811496] boot 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "8af33735-b39b-4b53-952c-68e544ffc047"}]': finished 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: osdmap e28: 7 total, 6 up, 7 in 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:09 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/2995717468' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[49764]: osdmap e29: 7 total, 6 up, 7 in 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/2995717468' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[54361]: osdmap e29: 7 total, 6 up, 7 in 2026-03-09T15:28:10.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:10 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:11.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:10 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:28:11.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:10 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:28:11.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:10 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/2995717468' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:11.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:10 vm09 ceph-mon[49358]: osdmap e29: 7 total, 6 up, 7 in 2026-03-09T15:28:11.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:10 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:11.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:11 vm05 ceph-mon[49764]: pgmap v59: 1 pgs: 1 peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-09T15:28:11.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:11 vm05 ceph-mon[49764]: osdmap e30: 7 total, 6 up, 7 in 2026-03-09T15:28:11.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:11 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:11 vm05 ceph-mon[54361]: pgmap v59: 1 pgs: 1 peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-09T15:28:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:11 vm05 ceph-mon[54361]: osdmap e30: 7 total, 6 up, 7 in 2026-03-09T15:28:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:11 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:12.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:11 vm09 ceph-mon[49358]: pgmap v59: 1 pgs: 1 peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-09T15:28:12.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:11 vm09 ceph-mon[49358]: osdmap e30: 7 total, 6 up, 7 in 2026-03-09T15:28:12.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:11 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:13.200 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:28:13.200 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:13 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:13.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:28:13.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:13 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:13.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:13 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:28:13.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:13 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:14.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:14 vm09 ceph-mon[49358]: Deploying daemon osd.6 on vm09 2026-03-09T15:28:14.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:14 vm09 ceph-mon[49358]: pgmap v61: 1 pgs: 1 peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-09T15:28:14.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:14 vm05 ceph-mon[49764]: Deploying daemon osd.6 on vm09 2026-03-09T15:28:14.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:14 vm05 ceph-mon[49764]: pgmap v61: 1 pgs: 1 peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-09T15:28:14.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:14 vm05 ceph-mon[54361]: Deploying daemon osd.6 on vm09 2026-03-09T15:28:14.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:14 vm05 ceph-mon[54361]: pgmap v61: 1 pgs: 1 peering; 449 KiB data, 33 MiB used, 120 GiB / 120 GiB avail 2026-03-09T15:28:15.053 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:15 vm09 ceph-mon[49358]: osdmap e31: 7 total, 6 up, 7 in 2026-03-09T15:28:15.053 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:15.053 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:15.053 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:15.053 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:15.053 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:15 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[49764]: osdmap e31: 7 total, 6 up, 7 in 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[54361]: osdmap e31: 7 total, 6 up, 7 in 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:15.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:15 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:15.760 INFO:teuthology.orchestra.run.vm09.stdout:Created osd(s) 6 on host 'vm09' 2026-03-09T15:28:15.871 DEBUG:teuthology.orchestra.run.vm09:osd.6> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.6.service 2026-03-09T15:28:15.876 INFO:tasks.cephadm:Deploying osd.7 on vm09 with /dev/vdb... 2026-03-09T15:28:15.876 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 ceph-volume -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- lvm zap /dev/vdb 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: osdmap e32: 7 total, 6 up, 7 in 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: pgmap v64: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 14 KiB/s, 0 objects/s recovering 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:28:16.198 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:16 vm09 ceph-mon[49358]: from='osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:28:16.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:28:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[58192]: 2026-03-09T15:28:15.973+0000 7fb75e6413c0 -1 osd.6 0 log_to_monitors true 2026-03-09T15:28:16.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: osdmap e32: 7 total, 6 up, 7 in 2026-03-09T15:28:16.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: pgmap v64: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 14 KiB/s, 0 objects/s recovering 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[49764]: from='osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: osdmap e32: 7 total, 6 up, 7 in 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: pgmap v64: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 14 KiB/s, 0 objects/s recovering 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:28:16.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:16 vm05 ceph-mon[54361]: from='osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:28:16.734 INFO:teuthology.orchestra.run.vm09.stdout: 2026-03-09T15:28:16.752 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch daemon add osd vm09:/dev/vdb 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: osdmap e33: 7 total, 6 up, 7 in 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 12 KiB/s, 0 objects/s recovering 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='client.24247 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: osdmap e33: 7 total, 6 up, 7 in 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 12 KiB/s, 0 objects/s recovering 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='client.24247 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:18.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:17 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.311 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:28:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[58192]: 2026-03-09T15:28:17.905+0000 7fb756847700 -1 osd.6 0 waiting for initial osdmap 2026-03-09T15:28:18.311 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:28:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[58192]: 2026-03-09T15:28:17.916+0000 7fb7519df700 -1 osd.6 34 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: osdmap e33: 7 total, 6 up, 7 in 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: pgmap v66: 1 pgs: 1 active+recovering; 449 KiB data, 34 MiB used, 120 GiB / 120 GiB avail; 12 KiB/s, 0 objects/s recovering 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='client.24247 -' entity='client.admin' cmd=[{"prefix": "orch daemon add osd", "svc_arg": "vm09:/dev/vdb", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd tree", "states": ["destroyed"], "format": "json"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.bootstrap-osd"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:18.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:17 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: Detected new or changed devices on vm09 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: Adjusting osd_memory_target on vm09 to 87739k 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: Unable to set osd_memory_target on vm09 to 89845486: error parsing value: Value '89845486' is below minimum 939524096 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: osdmap e34: 7 total, 6 up, 7 in 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/376498201' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191] boot 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]': finished 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: osdmap e35: 8 total, 7 up, 8 in 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/74599333' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: Detected new or changed devices on vm09 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: Adjusting osd_memory_target on vm09 to 87739k 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: Unable to set osd_memory_target on vm09 to 89845486: error parsing value: Value '89845486' is below minimum 939524096 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: osdmap e34: 7 total, 6 up, 7 in 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/376498201' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191] boot 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]': finished 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: osdmap e35: 8 total, 7 up, 8 in 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:18 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/74599333' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: Detected new or changed devices on vm09 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: Adjusting osd_memory_target on vm09 to 87739k 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: Unable to set osd_memory_target on vm09 to 89845486: error parsing value: Value '89845486' is below minimum 939524096 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: osdmap e34: 7 total, 6 up, 7 in 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/376498201' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd=[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: osd.6 [v2:192.168.123.109:6816/4136949191,v1:192.168.123.109:6817/4136949191] boot 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "74ebc967-e985-4f5a-a0f8-c8493e041bc8"}]': finished 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: osdmap e35: 8 total, 7 up, 8 in 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:18 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/74599333' entity='client.bootstrap-osd' cmd=[{"prefix": "mon getmap"}]: dispatch 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[49764]: osdmap e36: 8 total, 7 up, 8 in 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[49764]: pgmap v70: 1 pgs: 1 remapped+peering; 449 KiB data, 39 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[54361]: osdmap e36: 8 total, 7 up, 8 in 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:19 vm05 ceph-mon[54361]: pgmap v70: 1 pgs: 1 remapped+peering; 449 KiB data, 39 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:19 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:28:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:19 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:28:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:19 vm09 ceph-mon[49358]: osdmap e36: 8 total, 7 up, 8 in 2026-03-09T15:28:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:19 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:19 vm09 ceph-mon[49358]: pgmap v70: 1 pgs: 1 remapped+peering; 449 KiB data, 39 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:21.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:21 vm05 ceph-mon[49764]: osdmap e37: 8 total, 7 up, 8 in 2026-03-09T15:28:21.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:21 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:21.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:21 vm05 ceph-mon[54361]: osdmap e37: 8 total, 7 up, 8 in 2026-03-09T15:28:21.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:21 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:21.560 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:21 vm09 ceph-mon[49358]: osdmap e37: 8 total, 7 up, 8 in 2026-03-09T15:28:21.560 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:21 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:22.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:22 vm05 ceph-mon[49764]: pgmap v72: 1 pgs: 1 remapped+peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:22.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:22 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:28:22.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:22 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:22.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:22 vm05 ceph-mon[54361]: pgmap v72: 1 pgs: 1 remapped+peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:22.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:22 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:28:22.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:22 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:22.495 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:22 vm09 ceph-mon[49358]: pgmap v72: 1 pgs: 1 remapped+peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:22.495 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:22 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:28:22.495 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:22 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:23.409 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:23 vm09 ceph-mon[49358]: Deploying daemon osd.7 on vm09 2026-03-09T15:28:23.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:23 vm05 ceph-mon[49764]: Deploying daemon osd.7 on vm09 2026-03-09T15:28:23.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:23 vm05 ceph-mon[54361]: Deploying daemon osd.7 on vm09 2026-03-09T15:28:24.245 INFO:teuthology.orchestra.run.vm09.stdout:Created osd(s) 7 on host 'vm09' 2026-03-09T15:28:24.297 DEBUG:teuthology.orchestra.run.vm09:osd.7> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7.service 2026-03-09T15:28:24.298 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-03-09T15:28:24.299 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd stat -f json 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: pgmap v73: 1 pgs: 1 remapped+peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:24.482 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:24 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: pgmap v73: 1 pgs: 1 remapped+peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: pgmap v73: 1 pgs: 1 remapped+peering; 449 KiB data, 40 MiB used, 140 GiB / 140 GiB avail 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:24.583 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:24.584 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:24 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:24.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:28:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[60955]: 2026-03-09T15:28:24.481+0000 7fd605d473c0 -1 osd.7 0 log_to_monitors true 2026-03-09T15:28:24.821 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:24.897 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":37,"num_osds":8,"num_up_osds":7,"osd_up_since":1773070098,"num_in_osds":8,"osd_in_since":1773070098,"num_remapped_pgs":0} 2026-03-09T15:28:25.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:25 vm05 ceph-mon[49764]: from='osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:28:25.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:25 vm05 ceph-mon[49764]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:28:25.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:25 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2069901976' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:25.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:25 vm05 ceph-mon[54361]: from='osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:28:25.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:25 vm05 ceph-mon[54361]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:28:25.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:25 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2069901976' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:25.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:25 vm09 ceph-mon[49358]: from='osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:28:25.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:25 vm09 ceph-mon[49358]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:28:25.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:25 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2069901976' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:25.897 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd stat -f json 2026-03-09T15:28:26.356 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: osdmap e38: 8 total, 7 up, 8 in 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:26.438 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1549065037' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: osdmap e38: 8 total, 7 up, 8 in 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:26.439 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:26 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1549065037' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:26.439 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":38,"num_osds":8,"num_up_osds":7,"osd_up_since":1773070098,"num_in_osds":8,"osd_in_since":1773070098,"num_remapped_pgs":0} 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: pgmap v74: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 45 KiB/s, 0 objects/s recovering 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: osdmap e38: 8 total, 7 up, 8 in 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:26.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:26 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1549065037' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:26.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:28:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[60955]: 2026-03-09T15:28:26.396+0000 7fd5fc74a700 -1 osd.7 0 waiting for initial osdmap 2026-03-09T15:28:26.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:28:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[60955]: 2026-03-09T15:28:26.406+0000 7fd5f88e4700 -1 osd.7 39 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:28:27.440 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd stat -f json 2026-03-09T15:28:27.647 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: Detected new or changed devices on vm09 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: Adjusting osd_memory_target on vm09 to 65804k 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: Unable to set osd_memory_target on vm09 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: osdmap e39: 8 total, 7 up, 8 in 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446] boot 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: osdmap e40: 8 total, 8 up, 8 in 2026-03-09T15:28:27.648 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: Detected new or changed devices on vm09 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: Adjusting osd_memory_target on vm09 to 65804k 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: Unable to set osd_memory_target on vm09 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: osdmap e39: 8 total, 7 up, 8 in 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446] boot 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: osdmap e40: 8 total, 8 up, 8 in 2026-03-09T15:28:27.649 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:27 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: Detected new or changed devices on vm09 2026-03-09T15:28:27.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: Adjusting osd_memory_target on vm09 to 65804k 2026-03-09T15:28:27.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: Unable to set osd_memory_target on vm09 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-09T15:28:27.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-03-09T15:28:27.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: osdmap e39: 8 total, 7 up, 8 in 2026-03-09T15:28:27.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: osd.7 [v2:192.168.123.109:6824/3774114446,v1:192.168.123.109:6825/3774114446] boot 2026-03-09T15:28:27.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: osdmap e40: 8 total, 8 up, 8 in 2026-03-09T15:28:27.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:27 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:27.891 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:27.961 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":41,"num_osds":8,"num_up_osds":8,"osd_up_since":1773070106,"num_in_osds":8,"osd_in_since":1773070098,"num_remapped_pgs":0} 2026-03-09T15:28:27.961 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd dump --format=json 2026-03-09T15:28:28.116 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:28.453 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:28.453 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":41,"fsid":"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea","created":"2026-03-09T15:26:25.355245+0000","modified":"2026-03-09T15:28:27.563005+0000","last_up_change":"2026-03-09T15:28:26.560188+0000","last_in_change":"2026-03-09T15:28:18.200345+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-09T15:27:45.567534+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"17","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"04a1f096-2671-4227-83a4-258146ba498d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6803","nonce":1831508895}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6805","nonce":1831508895}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6809","nonce":1831508895}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6807","nonce":1831508895}]},"public_addr":"192.168.123.105:6803/1831508895","cluster_addr":"192.168.123.105:6805/1831508895","heartbeat_back_addr":"192.168.123.105:6809/1831508895","heartbeat_front_addr":"192.168.123.105:6807/1831508895","state":["exists","up"]},{"osd":1,"uuid":"a281303c-8662-4f54-8846-33be08391553","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6811","nonce":3783980181}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6813","nonce":3783980181}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6817","nonce":3783980181}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6815","nonce":3783980181}]},"public_addr":"192.168.123.105:6811/3783980181","cluster_addr":"192.168.123.105:6813/3783980181","heartbeat_back_addr":"192.168.123.105:6817/3783980181","heartbeat_front_addr":"192.168.123.105:6815/3783980181","state":["exists","up"]},{"osd":2,"uuid":"3556c187-377e-47cc-8f72-be4edaa111a4","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":14,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6819","nonce":2261056004}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6821","nonce":2261056004}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6825","nonce":2261056004}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6823","nonce":2261056004}]},"public_addr":"192.168.123.105:6819/2261056004","cluster_addr":"192.168.123.105:6821/2261056004","heartbeat_back_addr":"192.168.123.105:6825/2261056004","heartbeat_front_addr":"192.168.123.105:6823/2261056004","state":["exists","up"]},{"osd":3,"uuid":"fe8044ac-8ad4-4057-a7f5-e8e77db769c3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6827","nonce":2164219141}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6829","nonce":2164219141}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6832","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6833","nonce":2164219141}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6831","nonce":2164219141}]},"public_addr":"192.168.123.105:6827/2164219141","cluster_addr":"192.168.123.105:6829/2164219141","heartbeat_back_addr":"192.168.123.105:6833/2164219141","heartbeat_front_addr":"192.168.123.105:6831/2164219141","state":["exists","up"]},{"osd":4,"uuid":"c4e71d46-f1e6-4cdb-b025-41a19b086c9f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6801","nonce":364460566}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6803","nonce":364460566}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6807","nonce":364460566}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6805","nonce":364460566}]},"public_addr":"192.168.123.109:6801/364460566","cluster_addr":"192.168.123.109:6803/364460566","heartbeat_back_addr":"192.168.123.109:6807/364460566","heartbeat_front_addr":"192.168.123.109:6805/364460566","state":["exists","up"]},{"osd":5,"uuid":"b092f491-9b09-4c77-81aa-03a6adc5b415","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6809","nonce":1669811496}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6811","nonce":1669811496}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6815","nonce":1669811496}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6813","nonce":1669811496}]},"public_addr":"192.168.123.109:6809/1669811496","cluster_addr":"192.168.123.109:6811/1669811496","heartbeat_back_addr":"192.168.123.109:6815/1669811496","heartbeat_front_addr":"192.168.123.109:6813/1669811496","state":["exists","up"]},{"osd":6,"uuid":"8af33735-b39b-4b53-952c-68e544ffc047","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6817","nonce":4136949191}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6819","nonce":4136949191}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6823","nonce":4136949191}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6821","nonce":4136949191}]},"public_addr":"192.168.123.109:6817/4136949191","cluster_addr":"192.168.123.109:6819/4136949191","heartbeat_back_addr":"192.168.123.109:6823/4136949191","heartbeat_front_addr":"192.168.123.109:6821/4136949191","state":["exists","up"]},{"osd":7,"uuid":"74ebc967-e985-4f5a-a0f8-c8493e041bc8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6825","nonce":3774114446}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6827","nonce":3774114446}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6831","nonce":3774114446}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6829","nonce":3774114446}]},"public_addr":"192.168.123.109:6825/3774114446","cluster_addr":"192.168.123.109:6827/3774114446","heartbeat_back_addr":"192.168.123.109:6831/3774114446","heartbeat_front_addr":"192.168.123.109:6829/3774114446","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:26.926469+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:35.454112+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:43.615576+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:52.546989+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:00.315498+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:08.431556+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:16.976500+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:25.450762+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.105:6801/610584152":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/1659984683":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/3264305837":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/649368777":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/1527221936":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/2276953556":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/814043816":"2026-03-10T15:26:51.284350+0000","192.168.123.105:6800/610584152":"2026-03-10T15:26:51.284350+0000","192.168.123.105:6800/1499216893":"2026-03-10T15:26:40.537100+0000","192.168.123.105:6801/1499216893":"2026-03-10T15:26:40.537100+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T15:28:28.453 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[49764]: purged_snaps scrub starts 2026-03-09T15:28:28.453 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[49764]: purged_snaps scrub ok 2026-03-09T15:28:28.453 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[49764]: pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T15:28:28.453 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[49764]: osdmap e41: 8 total, 8 up, 8 in 2026-03-09T15:28:28.453 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3423228820' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:28.455 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[54361]: purged_snaps scrub starts 2026-03-09T15:28:28.455 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[54361]: purged_snaps scrub ok 2026-03-09T15:28:28.455 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[54361]: pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T15:28:28.455 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[54361]: osdmap e41: 8 total, 8 up, 8 in 2026-03-09T15:28:28.455 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:28 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3423228820' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:28.501 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-03-09T15:27:45.567534+0000', 'flags': 1, 'flags_names': 'hashpspool', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '17', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}}] 2026-03-09T15:28:28.501 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd pool get .mgr pg_num 2026-03-09T15:28:28.658 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:28.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:28 vm09 ceph-mon[49358]: purged_snaps scrub starts 2026-03-09T15:28:28.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:28 vm09 ceph-mon[49358]: purged_snaps scrub ok 2026-03-09T15:28:28.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:28 vm09 ceph-mon[49358]: pgmap v78: 1 pgs: 1 active+clean; 449 KiB data, 41 MiB used, 140 GiB / 140 GiB avail; 65 KiB/s, 0 objects/s recovering 2026-03-09T15:28:28.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:28 vm09 ceph-mon[49358]: osdmap e41: 8 total, 8 up, 8 in 2026-03-09T15:28:28.810 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:28 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3423228820' entity='client.admin' cmd=[{"prefix": "osd stat", "format": "json"}]: dispatch 2026-03-09T15:28:28.986 INFO:teuthology.orchestra.run.vm05.stdout:pg_num: 1 2026-03-09T15:28:29.056 INFO:tasks.cephadm:Adding prometheus.a on vm09 2026-03-09T15:28:29.056 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch apply prometheus '1;vm09=a' 2026-03-09T15:28:29.544 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled prometheus update... 2026-03-09T15:28:29.599 DEBUG:teuthology.orchestra.run.vm09:prometheus.a> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@prometheus.a.service 2026-03-09T15:28:29.601 INFO:tasks.cephadm:Adding node-exporter.a on vm05 2026-03-09T15:28:29.601 INFO:tasks.cephadm:Adding node-exporter.b on vm09 2026-03-09T15:28:29.601 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch apply node-exporter '2;vm05=a;vm09=b' 2026-03-09T15:28:29.601 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:29 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1106115764' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:29.601 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:29 vm09 ceph-mon[49358]: osdmap e42: 8 total, 8 up, 8 in 2026-03-09T15:28:29.601 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:29 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/567445977' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-09T15:28:29.734 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:29 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1106115764' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:29.734 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:29 vm05 ceph-mon[49764]: osdmap e42: 8 total, 8 up, 8 in 2026-03-09T15:28:29.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:29 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/567445977' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-09T15:28:29.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:29 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1106115764' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:29.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:29 vm05 ceph-mon[54361]: osdmap e42: 8 total, 8 up, 8 in 2026-03-09T15:28:29.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:29 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/567445977' entity='client.admin' cmd=[{"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"}]: dispatch 2026-03-09T15:28:30.100 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled node-exporter update... 2026-03-09T15:28:30.143 DEBUG:teuthology.orchestra.run.vm05:node-exporter.a> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service 2026-03-09T15:28:30.144 DEBUG:teuthology.orchestra.run.vm09:node-exporter.b> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service 2026-03-09T15:28:30.146 INFO:tasks.cephadm:Adding alertmanager.a on vm05 2026-03-09T15:28:30.146 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch apply alertmanager '1;vm05=a' 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: pgmap v81: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: pgmap v81: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T15:28:30.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:30 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: pgmap v81: 1 pgs: 1 active+clean; 449 KiB data, 46 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd=[{"prefix": "mgr module enable", "module": "prometheus"}]: dispatch 2026-03-09T15:28:30.639 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:30 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' 2026-03-09T15:28:30.985 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ignoring --setuser ceph since I am not root 2026-03-09T15:28:30.985 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ignoring --setgroup ceph since I am not root 2026-03-09T15:28:30.985 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:30.740+0000 7f1cdc218000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:28:30.985 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:30.795+0000 7f1cdc218000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:28:31.061 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ignoring --setuser ceph since I am not root 2026-03-09T15:28:31.061 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ignoring --setgroup ceph since I am not root 2026-03-09T15:28:31.061 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:30.737+0000 7ff3bbf39000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:28:31.061 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:30.793+0000 7ff3bbf39000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[49764]: from='client.24308 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm09=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[49764]: Saving service prometheus spec with placement vm09=a;count:1 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[49764]: from='client.24314 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm05=a;vm09=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[49764]: Saving service node-exporter spec with placement vm05=a;vm09=b;count:2 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[49764]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[49764]: mgrmap e16: y(active, since 99s), standbys: x 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:31 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:31.196+0000 7f1cdc218000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[54361]: from='client.24308 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm09=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[54361]: Saving service prometheus spec with placement vm09=a;count:1 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[54361]: from='client.24314 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm05=a;vm09=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[54361]: Saving service node-exporter spec with placement vm05=a;vm09=b;count:2 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[54361]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T15:28:31.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:31 vm05 ceph-mon[54361]: mgrmap e16: y(active, since 99s), standbys: x 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:31 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:31.197+0000 7ff3bbf39000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:31 vm09 ceph-mon[49358]: from='client.24308 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "placement": "1;vm09=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:31 vm09 ceph-mon[49358]: Saving service prometheus spec with placement vm09=a;count:1 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:31 vm09 ceph-mon[49358]: from='client.24314 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "placement": "2;vm05=a;vm09=b", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:31 vm09 ceph-mon[49358]: Saving service node-exporter spec with placement vm05=a;vm09=b;count:2 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:31 vm09 ceph-mon[49358]: from='mgr.14152 192.168.123.105:0/2802145059' entity='mgr.y' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-03-09T15:28:31.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:31 vm09 ceph-mon[49358]: mgrmap e16: y(active, since 99s), standbys: x 2026-03-09T15:28:31.955 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:31 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:31.579+0000 7ff3bbf39000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:28:31.955 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:31 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:31.725+0000 7ff3bbf39000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:28:31.955 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:31 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:31.781+0000 7ff3bbf39000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:28:31.968 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:31 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:31.582+0000 7f1cdc218000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:28:31.968 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:31 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:31.732+0000 7f1cdc218000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:28:31.968 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:31 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:31.786+0000 7f1cdc218000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:28:32.235 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:31 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:31.966+0000 7f1cdc218000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:28:32.311 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:31 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:31.955+0000 7ff3bbf39000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:28:32.829 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:32.561+0000 7ff3bbf39000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:28:32.829 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:32.767+0000 7ff3bbf39000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:28:32.854 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:32.579+0000 7f1cdc218000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:28:32.854 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:32.789+0000 7f1cdc218000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:28:33.235 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:32.852+0000 7f1cdc218000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:28:33.235 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:32.910+0000 7f1cdc218000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:28:33.235 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:32.976+0000 7f1cdc218000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:28:33.235 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:33.034+0000 7f1cdc218000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:28:33.311 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:32.829+0000 7ff3bbf39000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:28:33.311 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:32.885+0000 7ff3bbf39000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:28:33.311 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:32.949+0000 7ff3bbf39000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:28:33.311 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:33.008+0000 7ff3bbf39000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:28:33.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:33.349+0000 7f1cdc218000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:28:33.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:33.423+0000 7f1cdc218000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:28:33.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:33.324+0000 7ff3bbf39000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:28:33.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:33.399+0000 7ff3bbf39000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:28:34.253 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:33.989+0000 7ff3bbf39000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:28:34.253 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.056+0000 7ff3bbf39000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:28:34.253 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.126+0000 7ff3bbf39000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:28:34.274 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.009+0000 7f1cdc218000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:28:34.274 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.077+0000 7f1cdc218000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:28:34.274 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.145+0000 7f1cdc218000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:28:34.509 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.253+0000 7ff3bbf39000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:28:34.509 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.314+0000 7ff3bbf39000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:28:34.509 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.418+0000 7ff3bbf39000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:28:34.546 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.273+0000 7f1cdc218000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:28:34.546 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.345+0000 7f1cdc218000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:28:34.546 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.451+0000 7f1cdc218000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:28:34.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.509+0000 7ff3bbf39000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:28:34.877 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.544+0000 7f1cdc218000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: Standby manager daemon x restarted 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: Standby manager daemon x started 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: Active manager daemon y restarted 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: Activating manager daemon y 2026-03-09T15:28:35.134 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[49764]: osdmap e43: 8 total, 8 up, 8 in 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.875+0000 7f1cdc218000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:34 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:34.945+0000 7f1cdc218000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:28:35] ENGINE Bus STARTING 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: Standby manager daemon x restarted 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: Active manager daemon y restarted 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: Activating manager daemon y 2026-03-09T15:28:35.135 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:34 vm05 ceph-mon[54361]: osdmap e43: 8 total, 8 up, 8 in 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.841+0000 7ff3bbf39000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:34.905+0000 7ff3bbf39000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: [09/Mar/2026:15:28:34] ENGINE Bus STARTING 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: CherryPy Checker: 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: The Application mounted at '' has an empty config. 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: [09/Mar/2026:15:28:35] ENGINE Serving on http://:::9283 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: [09/Mar/2026:15:28:35] ENGINE Bus STARTED 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: Standby manager daemon x restarted 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:28:35.141 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:28:35.142 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:28:35.142 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/270849628' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:28:35.142 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: Active manager daemon y restarted 2026-03-09T15:28:35.142 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: Activating manager daemon y 2026-03-09T15:28:35.142 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:34 vm09 ceph-mon[49358]: osdmap e43: 8 total, 8 up, 8 in 2026-03-09T15:28:35.461 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: CherryPy Checker: 2026-03-09T15:28:35.461 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: The Application mounted at '' has an empty config. 2026-03-09T15:28:35.461 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:28:35.461 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:28:35] ENGINE Serving on http://:::9283 2026-03-09T15:28:35.461 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:28:35] ENGINE Bus STARTED 2026-03-09T15:28:35.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:28:35] ENGINE Bus STARTING 2026-03-09T15:28:35.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:28:35] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:28:35.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:28:35] ENGINE Bus STARTED 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: mgrmap e17: y(active, starting, since 0.0183893s), standbys: x 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: Manager daemon y is now available 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:28:36.033 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:28:36.034 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: [09/Mar/2026:15:28:35] ENGINE Bus STARTING 2026-03-09T15:28:36.034 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: [09/Mar/2026:15:28:35] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:28:36.034 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: [09/Mar/2026:15:28:35] ENGINE Bus STARTED 2026-03-09T15:28:36.034 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.034 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.034 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.034 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled alertmanager update... 2026-03-09T15:28:36.103 DEBUG:teuthology.orchestra.run.vm05:alertmanager.a> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@alertmanager.a.service 2026-03-09T15:28:36.105 INFO:tasks.cephadm:Adding grafana.a on vm09 2026-03-09T15:28:36.105 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph orch apply grafana '1;vm09=a' 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: mgrmap e17: y(active, starting, since 0.0183893s), standbys: x 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: Manager daemon y is now available 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: [09/Mar/2026:15:28:35] ENGINE Bus STARTING 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: [09/Mar/2026:15:28:35] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: [09/Mar/2026:15:28:35] ENGINE Bus STARTED 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: mgrmap e17: y(active, starting, since 0.0183893s), standbys: x 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:28:36.113 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: Manager daemon y is now available 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: [09/Mar/2026:15:28:35] ENGINE Bus STARTING 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: [09/Mar/2026:15:28:35] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: [09/Mar/2026:15:28:35] ENGINE Bus STARTED 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.114 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:36.646 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled grafana update... 2026-03-09T15:28:36.699 DEBUG:teuthology.orchestra.run.vm09:grafana.a> sudo journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@grafana.a.service 2026-03-09T15:28:36.701 INFO:tasks.cephadm:Setting up client nodes... 2026-03-09T15:28:36.701 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: mgrmap e18: y(active, since 1.05198s), standbys: x 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='client.24320 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Saving service alertmanager spec with placement vm05=a;count:1 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Adjusting osd_memory_target on vm09 to 65804k 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Unable to set osd_memory_target on vm09 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='client.24331 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm09=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: Saving service grafana spec with placement vm09=a;count:1 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: mgrmap e18: y(active, since 1.05198s), standbys: x 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='client.24320 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Saving service alertmanager spec with placement vm05=a;count:1 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.222 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Adjusting osd_memory_target on vm09 to 65804k 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Unable to set osd_memory_target on vm09 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='client.24331 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm09=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: Saving service grafana spec with placement vm09=a;count:1 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.223 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:37 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: mgrmap e18: y(active, since 1.05198s), standbys: x 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='client.24320 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "placement": "1;vm05=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: Saving service alertmanager spec with placement vm05=a;count:1 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: pgmap v3: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.4", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.5", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.6", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd.7", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Adjusting osd_memory_target on vm09 to 65804k 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Unable to set osd_memory_target on vm09 to 67384115: error parsing value: Value '67384115' is below minimum 939524096 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='client.24331 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "placement": "1;vm09=a", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: Saving service grafana spec with placement vm09=a;count:1 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:37.346 INFO:teuthology.orchestra.run.vm05.stdout:[client.0] 2026-03-09T15:28:37.346 INFO:teuthology.orchestra.run.vm05.stdout: key = AQAl565pcPT8ExAA2eEOQQ17rTPiJG1EE3nM1Q== 2026-03-09T15:28:37.524 DEBUG:teuthology.orchestra.run.vm05:> set -ex 2026-03-09T15:28:37.524 DEBUG:teuthology.orchestra.run.vm05:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-03-09T15:28:37.524 DEBUG:teuthology.orchestra.run.vm05:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-03-09T15:28:37.574 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-03-09T15:28:37.985 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:37 vm05 bash[68624]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-09T15:28:38.079 INFO:teuthology.orchestra.run.vm09.stdout:[client.1] 2026-03-09T15:28:38.079 INFO:teuthology.orchestra.run.vm09.stdout: key = AQAm565pv25zBBAATQcyIGOqpH6xEgr0aiTF/Q== 2026-03-09T15:28:38.113 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:38 vm09 ceph-mon[49358]: Deploying daemon node-exporter.a on vm05 2026-03-09T15:28:38.113 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:38 vm09 ceph-mon[49358]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:38.113 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:38 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2348387666' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:38.113 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:38 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:38.113 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:38 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:28:38.113 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:38 vm09 ceph-mon[49358]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-09T15:28:38.146 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-03-09T15:28:38.147 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-03-09T15:28:38.147 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-03-09T15:28:38.185 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-03-09T15:28:38.185 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-03-09T15:28:38.185 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph mgr dump --format=json 2026-03-09T15:28:38.336 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:38.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[49764]: Deploying daemon node-exporter.a on vm05 2026-03-09T15:28:38.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[49764]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2348387666' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[49764]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[54361]: Deploying daemon node-exporter.a on vm05 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[54361]: pgmap v4: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2348387666' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:28:38.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:38 vm05 ceph-mon[54361]: mgrmap e19: y(active, since 2s), standbys: x 2026-03-09T15:28:38.686 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:38.755 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":19,"active_gid":24307,"active_name":"y","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6800","nonce":3348665640},{"type":"v1","addr":"192.168.123.105:6801","nonce":3348665640}]},"active_addr":"192.168.123.105:6801/3348665640","active_change":"2026-03-09T15:28:34.951318+0000","active_mgr_features":4540138303579357183,"available":true,"standbys":[{"gid":24326,"name":"x","mgr_features":4540138303579357183,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus","restful"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2400","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"7","min":"0","max":"7","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 or 7 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","upmap"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.23.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/ceph-grafana:8.3.5","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"docker.io/library/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"HAproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"docker.io/arcts/keepalived","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.3.1","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v2.33.4","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"SNMP Gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"docker.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"diskprediction_local","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predict_interval":{"name":"predict_interval","type":"str","level":"advanced","flags":0,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"predictor_model":{"name":"predictor_model","type":"str","level":"advanced","flags":0,"default_value":"prophetstor","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"str","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"noautoscale":{"name":"noautoscale","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"global autoscale flag","long_desc":"Option to turn on/off the autoscaler for all pools","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"restful","can_run":true,"error_string":"","module_options":{"enable_auth":{"name":"enable_auth","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"drive_group_interval":{"name":"drive_group_interval","type":"float","level":"advanced","flags":0,"default_value":"300.0","min":"","max":"","enum_allowed":[],"desc":"interval in seconds between re-application of applied drive_groups","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]}}},{"name":"zabbix","can_run":true,"error_string":"","module_options":{"discovery_interval":{"name":"discovery_interval","type":"uint","level":"advanced","flags":0,"default_value":"100","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"identifier":{"name":"identifier","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_host":{"name":"zabbix_host","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_port":{"name":"zabbix_port","type":"int","level":"advanced","flags":0,"default_value":"10051","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"zabbix_sender":{"name":"zabbix_sender","type":"str","level":"advanced","flags":0,"default_value":"/usr/bin/zabbix_sender","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.105:8443/","prometheus":"http://192.168.123.105:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"last_failure_osd_epoch":43,"active_clients":[{"addrvec":[{"type":"v2","addr":"192.168.123.105:0","nonce":2151097925}]},{"addrvec":[{"type":"v2","addr":"192.168.123.105:0","nonce":3296056962}]},{"addrvec":[{"type":"v2","addr":"192.168.123.105:0","nonce":442745881}]},{"addrvec":[{"type":"v2","addr":"192.168.123.105:0","nonce":3307379082}]}]}} 2026-03-09T15:28:38.756 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-03-09T15:28:38.756 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-03-09T15:28:38.756 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd dump --format=json 2026-03-09T15:28:38.915 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:38.943 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:38 vm05 bash[68624]: Getting image source signatures 2026-03-09T15:28:38.943 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:38 vm05 bash[68624]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-09T15:28:38.943 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:38 vm05 bash[68624]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-09T15:28:38.943 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:38 vm05 bash[68624]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[49764]: from='client.? 192.168.123.109:0/1915483657' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1231038315' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[54361]: from='client.? 192.168.123.109:0/1915483657' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:28:39.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:39 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1231038315' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T15:28:39.305 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:39.305 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":43,"fsid":"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea","created":"2026-03-09T15:26:25.355245+0000","modified":"2026-03-09T15:28:34.948949+0000","last_up_change":"2026-03-09T15:28:26.560188+0000","last_in_change":"2026-03-09T15:28:18.200345+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-09T15:27:45.567534+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"17","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"04a1f096-2671-4227-83a4-258146ba498d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6803","nonce":1831508895}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6805","nonce":1831508895}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6809","nonce":1831508895}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6807","nonce":1831508895}]},"public_addr":"192.168.123.105:6803/1831508895","cluster_addr":"192.168.123.105:6805/1831508895","heartbeat_back_addr":"192.168.123.105:6809/1831508895","heartbeat_front_addr":"192.168.123.105:6807/1831508895","state":["exists","up"]},{"osd":1,"uuid":"a281303c-8662-4f54-8846-33be08391553","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6811","nonce":3783980181}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6813","nonce":3783980181}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6817","nonce":3783980181}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6815","nonce":3783980181}]},"public_addr":"192.168.123.105:6811/3783980181","cluster_addr":"192.168.123.105:6813/3783980181","heartbeat_back_addr":"192.168.123.105:6817/3783980181","heartbeat_front_addr":"192.168.123.105:6815/3783980181","state":["exists","up"]},{"osd":2,"uuid":"3556c187-377e-47cc-8f72-be4edaa111a4","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":14,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6819","nonce":2261056004}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6821","nonce":2261056004}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6825","nonce":2261056004}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6823","nonce":2261056004}]},"public_addr":"192.168.123.105:6819/2261056004","cluster_addr":"192.168.123.105:6821/2261056004","heartbeat_back_addr":"192.168.123.105:6825/2261056004","heartbeat_front_addr":"192.168.123.105:6823/2261056004","state":["exists","up"]},{"osd":3,"uuid":"fe8044ac-8ad4-4057-a7f5-e8e77db769c3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6827","nonce":2164219141}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6829","nonce":2164219141}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6832","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6833","nonce":2164219141}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6831","nonce":2164219141}]},"public_addr":"192.168.123.105:6827/2164219141","cluster_addr":"192.168.123.105:6829/2164219141","heartbeat_back_addr":"192.168.123.105:6833/2164219141","heartbeat_front_addr":"192.168.123.105:6831/2164219141","state":["exists","up"]},{"osd":4,"uuid":"c4e71d46-f1e6-4cdb-b025-41a19b086c9f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6801","nonce":364460566}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6803","nonce":364460566}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6807","nonce":364460566}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6805","nonce":364460566}]},"public_addr":"192.168.123.109:6801/364460566","cluster_addr":"192.168.123.109:6803/364460566","heartbeat_back_addr":"192.168.123.109:6807/364460566","heartbeat_front_addr":"192.168.123.109:6805/364460566","state":["exists","up"]},{"osd":5,"uuid":"b092f491-9b09-4c77-81aa-03a6adc5b415","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6809","nonce":1669811496}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6811","nonce":1669811496}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6815","nonce":1669811496}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6813","nonce":1669811496}]},"public_addr":"192.168.123.109:6809/1669811496","cluster_addr":"192.168.123.109:6811/1669811496","heartbeat_back_addr":"192.168.123.109:6815/1669811496","heartbeat_front_addr":"192.168.123.109:6813/1669811496","state":["exists","up"]},{"osd":6,"uuid":"8af33735-b39b-4b53-952c-68e544ffc047","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6817","nonce":4136949191}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6819","nonce":4136949191}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6823","nonce":4136949191}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6821","nonce":4136949191}]},"public_addr":"192.168.123.109:6817/4136949191","cluster_addr":"192.168.123.109:6819/4136949191","heartbeat_back_addr":"192.168.123.109:6823/4136949191","heartbeat_front_addr":"192.168.123.109:6821/4136949191","state":["exists","up"]},{"osd":7,"uuid":"74ebc967-e985-4f5a-a0f8-c8493e041bc8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6825","nonce":3774114446}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6827","nonce":3774114446}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6831","nonce":3774114446}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6829","nonce":3774114446}]},"public_addr":"192.168.123.109:6825/3774114446","cluster_addr":"192.168.123.109:6827/3774114446","heartbeat_back_addr":"192.168.123.109:6831/3774114446","heartbeat_front_addr":"192.168.123.109:6829/3774114446","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:26.926469+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:35.454112+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:43.615576+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:52.546989+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:00.315498+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:08.431556+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:16.976500+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:25.450762+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.105:0/1724629666":"2026-03-10T15:28:34.948916+0000","192.168.123.105:6801/3424409228":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/2003379400":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/3945943148":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/954087458":"2026-03-10T15:28:34.948916+0000","192.168.123.105:6801/610584152":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/1659984683":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/3264305837":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/649368777":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/1527221936":"2026-03-10T15:26:40.537100+0000","192.168.123.105:6800/3424409228":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/2276953556":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/814043816":"2026-03-10T15:26:51.284350+0000","192.168.123.105:6800/610584152":"2026-03-10T15:26:51.284350+0000","192.168.123.105:6800/1499216893":"2026-03-10T15:26:40.537100+0000","192.168.123.105:6801/1499216893":"2026-03-10T15:26:40.537100+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T15:28:39.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:39 vm09 ceph-mon[49358]: from='client.? 192.168.123.109:0/1915483657' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:39.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:39 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]: dispatch 2026-03-09T15:28:39.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:39 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-03-09T15:28:39.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:39 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1231038315' entity='client.admin' cmd=[{"prefix": "mgr dump", "format": "json"}]: dispatch 2026-03-09T15:28:39.369 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-03-09T15:28:39.369 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd dump --format=json 2026-03-09T15:28:39.591 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:39.987 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 bash[68624]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-09T15:28:39.987 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 bash[68624]: Writing manifest to image destination 2026-03-09T15:28:39.987 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 podman[68624]: 2026-03-09 15:28:39.676589864 +0000 UTC m=+2.080674064 container create 9e0bcae9a93c5cdd9b8309a9a3f2f027509f56e92e5f40e3ec1f07ee74d19a8f (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 podman[68624]: 2026-03-09 15:28:39.708275359 +0000 UTC m=+2.112359560 container init 9e0bcae9a93c5cdd9b8309a9a3f2f027509f56e92e5f40e3ec1f07ee74d19a8f (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 podman[68624]: 2026-03-09 15:28:39.711580141 +0000 UTC m=+2.115664341 container start 9e0bcae9a93c5cdd9b8309a9a3f2f027509f56e92e5f40e3ec1f07ee74d19a8f (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 bash[68624]: 9e0bcae9a93c5cdd9b8309a9a3f2f027509f56e92e5f40e3ec1f07ee74d19a8f 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 podman[68624]: 2026-03-09 15:28:39.669954059 +0000 UTC m=+2.074038270 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.721Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.721Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 systemd[1]: Started Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=arp 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=edac 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=os 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=stat 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=time 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=uname 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-09T15:28:39.988 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:28:39 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[68983]: ts=2026-03-09T15:28:39.724Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-09T15:28:40.006 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:40.007 INFO:teuthology.orchestra.run.vm05.stdout:{"epoch":43,"fsid":"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea","created":"2026-03-09T15:26:25.355245+0000","modified":"2026-03-09T15:28:34.948949+0000","last_up_change":"2026-03-09T15:28:26.560188+0000","last_in_change":"2026-03-09T15:28:18.200345+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":18,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"quincy","pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-03-09T15:27:45.567534+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"17","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}}}],"osds":[{"osd":0,"uuid":"04a1f096-2671-4227-83a4-258146ba498d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":8,"up_thru":40,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6802","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6803","nonce":1831508895}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6804","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6805","nonce":1831508895}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6808","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6809","nonce":1831508895}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6806","nonce":1831508895},{"type":"v1","addr":"192.168.123.105:6807","nonce":1831508895}]},"public_addr":"192.168.123.105:6803/1831508895","cluster_addr":"192.168.123.105:6805/1831508895","heartbeat_back_addr":"192.168.123.105:6809/1831508895","heartbeat_front_addr":"192.168.123.105:6807/1831508895","state":["exists","up"]},{"osd":1,"uuid":"a281303c-8662-4f54-8846-33be08391553","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":11,"up_thru":24,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6810","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6811","nonce":3783980181}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6812","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6813","nonce":3783980181}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6816","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6817","nonce":3783980181}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6814","nonce":3783980181},{"type":"v1","addr":"192.168.123.105:6815","nonce":3783980181}]},"public_addr":"192.168.123.105:6811/3783980181","cluster_addr":"192.168.123.105:6813/3783980181","heartbeat_back_addr":"192.168.123.105:6817/3783980181","heartbeat_front_addr":"192.168.123.105:6815/3783980181","state":["exists","up"]},{"osd":2,"uuid":"3556c187-377e-47cc-8f72-be4edaa111a4","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":14,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6818","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6819","nonce":2261056004}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6820","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6821","nonce":2261056004}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6824","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6825","nonce":2261056004}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6822","nonce":2261056004},{"type":"v1","addr":"192.168.123.105:6823","nonce":2261056004}]},"public_addr":"192.168.123.105:6819/2261056004","cluster_addr":"192.168.123.105:6821/2261056004","heartbeat_back_addr":"192.168.123.105:6825/2261056004","heartbeat_front_addr":"192.168.123.105:6823/2261056004","state":["exists","up"]},{"osd":3,"uuid":"fe8044ac-8ad4-4057-a7f5-e8e77db769c3","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6826","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6827","nonce":2164219141}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6828","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6829","nonce":2164219141}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6832","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6833","nonce":2164219141}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.105:6830","nonce":2164219141},{"type":"v1","addr":"192.168.123.105:6831","nonce":2164219141}]},"public_addr":"192.168.123.105:6827/2164219141","cluster_addr":"192.168.123.105:6829/2164219141","heartbeat_back_addr":"192.168.123.105:6833/2164219141","heartbeat_front_addr":"192.168.123.105:6831/2164219141","state":["exists","up"]},{"osd":4,"uuid":"c4e71d46-f1e6-4cdb-b025-41a19b086c9f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":23,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6801","nonce":364460566}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6803","nonce":364460566}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6807","nonce":364460566}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":364460566},{"type":"v1","addr":"192.168.123.109:6805","nonce":364460566}]},"public_addr":"192.168.123.109:6801/364460566","cluster_addr":"192.168.123.109:6803/364460566","heartbeat_back_addr":"192.168.123.109:6807/364460566","heartbeat_front_addr":"192.168.123.109:6805/364460566","state":["exists","up"]},{"osd":5,"uuid":"b092f491-9b09-4c77-81aa-03a6adc5b415","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":28,"up_thru":29,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6809","nonce":1669811496}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6811","nonce":1669811496}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6815","nonce":1669811496}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":1669811496},{"type":"v1","addr":"192.168.123.109:6813","nonce":1669811496}]},"public_addr":"192.168.123.109:6809/1669811496","cluster_addr":"192.168.123.109:6811/1669811496","heartbeat_back_addr":"192.168.123.109:6815/1669811496","heartbeat_front_addr":"192.168.123.109:6813/1669811496","state":["exists","up"]},{"osd":6,"uuid":"8af33735-b39b-4b53-952c-68e544ffc047","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":35,"up_thru":36,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6817","nonce":4136949191}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6819","nonce":4136949191}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6823","nonce":4136949191}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":4136949191},{"type":"v1","addr":"192.168.123.109:6821","nonce":4136949191}]},"public_addr":"192.168.123.109:6817/4136949191","cluster_addr":"192.168.123.109:6819/4136949191","heartbeat_back_addr":"192.168.123.109:6823/4136949191","heartbeat_front_addr":"192.168.123.109:6821/4136949191","state":["exists","up"]},{"osd":7,"uuid":"74ebc967-e985-4f5a-a0f8-c8493e041bc8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":40,"up_thru":41,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6825","nonce":3774114446}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6827","nonce":3774114446}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6831","nonce":3774114446}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":3774114446},{"type":"v1","addr":"192.168.123.109:6829","nonce":3774114446}]},"public_addr":"192.168.123.109:6825/3774114446","cluster_addr":"192.168.123.109:6827/3774114446","heartbeat_back_addr":"192.168.123.109:6831/3774114446","heartbeat_front_addr":"192.168.123.109:6829/3774114446","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:26.926469+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:35.454112+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:43.615576+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:27:52.546989+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:00.315498+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:08.431556+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:16.976500+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4540138303579357183,"old_weight":0,"last_purged_snaps_scrub":"2026-03-09T15:28:25.450762+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.105:0/1724629666":"2026-03-10T15:28:34.948916+0000","192.168.123.105:6801/3424409228":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/2003379400":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/3945943148":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/954087458":"2026-03-10T15:28:34.948916+0000","192.168.123.105:6801/610584152":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/1659984683":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/3264305837":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/649368777":"2026-03-10T15:26:51.284350+0000","192.168.123.105:0/1527221936":"2026-03-10T15:26:40.537100+0000","192.168.123.105:6800/3424409228":"2026-03-10T15:28:34.948916+0000","192.168.123.105:0/2276953556":"2026-03-10T15:26:40.537100+0000","192.168.123.105:0/814043816":"2026-03-10T15:26:51.284350+0000","192.168.123.105:6800/610584152":"2026-03-10T15:26:51.284350+0000","192.168.123.105:6800/1499216893":"2026-03-10T15:26:40.537100+0000","192.168.123.105:6801/1499216893":"2026-03-10T15:26:40.537100+0000"},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"jerasure","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.0 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.1 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.2 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.3 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.4 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.5 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.6 flush_pg_stats 2026-03-09T15:28:40.080 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph tell osd.7 flush_pg_stats 2026-03-09T15:28:40.365 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[49764]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:40.365 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/573082288' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:40.365 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:40.365 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/55653358' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:40.366 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[54361]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:40.366 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/573082288' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:40.366 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:40.366 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:40 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/55653358' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:40.423 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:40 vm09 systemd[1]: Starting Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:28:40.423 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:40 vm09 ceph-mon[49358]: pgmap v5: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:40.423 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:40 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/573082288' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:40.423 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:40 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:40.423 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:40 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/55653358' entity='client.admin' cmd=[{"prefix": "osd dump", "format": "json"}]: dispatch 2026-03-09T15:28:40.711 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:40.723 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:40.734 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:40.754 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:40.811 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:40 vm09 bash[63577]: Trying to pull quay.io/prometheus/node-exporter:v1.3.1... 2026-03-09T15:28:40.837 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:40.852 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:40.856 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:41.260 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:41.370 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:41 vm05 ceph-mon[49764]: Deploying daemon node-exporter.b on vm09 2026-03-09T15:28:41.371 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:41 vm05 ceph-mon[54361]: Deploying daemon node-exporter.b on vm09 2026-03-09T15:28:41.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:41 vm09 ceph-mon[49358]: Deploying daemon node-exporter.b on vm09 2026-03-09T15:28:42.031 INFO:teuthology.orchestra.run.vm05.stdout:85899345931 2026-03-09T15:28:42.031 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.3 2026-03-09T15:28:42.051 INFO:teuthology.orchestra.run.vm05.stdout:60129542157 2026-03-09T15:28:42.051 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.2 2026-03-09T15:28:42.060 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:41 vm09 bash[63577]: Getting image source signatures 2026-03-09T15:28:42.061 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:41 vm09 bash[63577]: Copying blob sha256:b5db1e299295edf3005515ab7879c1df64a33c185d3a7a23aa4dcaa17d26f7b3 2026-03-09T15:28:42.061 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:41 vm09 bash[63577]: Copying blob sha256:aa2a8d90b84cb2a9c422e7005cd166a008ccf22ef5d7d4f07128478585ce35ea 2026-03-09T15:28:42.061 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:41 vm09 bash[63577]: Copying blob sha256:b45d31ee2d7f9f452678a85b0c837c29e12089f31ee8dbac6c8c24dfa4054a30 2026-03-09T15:28:42.082 INFO:teuthology.orchestra.run.vm05.stdout:34359738384 2026-03-09T15:28:42.083 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.0 2026-03-09T15:28:42.279 INFO:teuthology.orchestra.run.vm05.stdout:98784247817 2026-03-09T15:28:42.279 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.4 2026-03-09T15:28:42.291 INFO:teuthology.orchestra.run.vm05.stdout:120259084296 2026-03-09T15:28:42.292 INFO:teuthology.orchestra.run.vm05.stdout:150323855366 2026-03-09T15:28:42.292 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.5 2026-03-09T15:28:42.292 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.6 2026-03-09T15:28:42.476 INFO:teuthology.orchestra.run.vm05.stdout:47244640270 2026-03-09T15:28:42.477 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.1 2026-03-09T15:28:42.497 INFO:teuthology.orchestra.run.vm05.stdout:171798691844 2026-03-09T15:28:42.497 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.7 2026-03-09T15:28:42.573 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-mon[49358]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:42.573 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 bash[63577]: Copying config sha256:1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 bash[63577]: Writing manifest to image destination 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 podman[63577]: 2026-03-09 15:28:42.327828732 +0000 UTC m=+1.916148040 image pull 1dbe0e931976487e20e5cfb272087e08a9779c88fd5e9617ed7042dd9751ec26 quay.io/prometheus/node-exporter:v1.3.1 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 podman[63577]: 2026-03-09 15:28:42.334261304 +0000 UTC m=+1.922580612 container create b80da7ef91675d378780d3d39f6352eaa1338048014243676b24b849ac270863 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 podman[63577]: 2026-03-09 15:28:42.368591588 +0000 UTC m=+1.956910896 container init b80da7ef91675d378780d3d39f6352eaa1338048014243676b24b849ac270863 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 podman[63577]: 2026-03-09 15:28:42.37417398 +0000 UTC m=+1.962493288 container start b80da7ef91675d378780d3d39f6352eaa1338048014243676b24b849ac270863 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 bash[63577]: b80da7ef91675d378780d3d39f6352eaa1338048014243676b24b849ac270863 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 systemd[1]: Started Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.382Z caller=node_exporter.go:182 level=info msg="Starting node_exporter" version="(version=1.3.1, branch=HEAD, revision=a2321e7b940ddcff26873612bccdf7cd4c42b6b6)" 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.382Z caller=node_exporter.go:183 level=info msg="Build context" build_context="(go=go1.17.3, user=root@243aafa5525c, date=20211205-11:09:49)" 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+)($|/) 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:108 level=info msg="Enabled collectors" 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=arp 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=bcache 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=bonding 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=btrfs 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=conntrack 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=cpu 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=cpufreq 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=diskstats 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=dmi 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=edac 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=entropy 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=fibrechannel 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=filefd 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=filesystem 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=hwmon 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=infiniband 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=ipvs 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=loadavg 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=mdadm 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=meminfo 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=netclass 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=netdev 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=netstat 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=nfs 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=nfsd 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=nvme 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=os 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=powersupplyclass 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=pressure 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=rapl 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=schedstat 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=sockstat 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=softnet 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=stat 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=tapestats 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=textfile 2026-03-09T15:28:42.574 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=thermal_zone 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=time 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=udp_queues 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=uname 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=vmstat 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=xfs 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:115 level=info collector=zfs 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=node_exporter.go:199 level=info msg="Listening on" address=:9100 2026-03-09T15:28:42.575 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:28:42 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[63631]: ts=2026-03-09T15:28:42.383Z caller=tls_config.go:195 level=info msg="TLS is disabled." http2=false 2026-03-09T15:28:42.601 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:42.643 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:42 vm05 ceph-mon[49764]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:42.643 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:42 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:42.643 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:42 vm05 ceph-mon[54361]: pgmap v6: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:42.643 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:42 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:42.840 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:43.058 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:43.228 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:43.235 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:43.416 INFO:teuthology.orchestra.run.vm05.stdout:60129542155 2026-03-09T15:28:43.433 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:43.619 INFO:tasks.cephadm.ceph_manager.ceph:need seq 60129542157 got 60129542155 for osd.2 2026-03-09T15:28:43.697 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:43.722 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:43 vm05 ceph-mon[49764]: Deploying daemon prometheus.a on vm09 2026-03-09T15:28:43.722 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:43 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/863849729' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T15:28:43.722 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:43 vm05 ceph-mon[54361]: Deploying daemon prometheus.a on vm09 2026-03-09T15:28:43.722 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:43 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/863849729' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T15:28:43.725 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:43 vm09 ceph-mon[49358]: Deploying daemon prometheus.a on vm09 2026-03-09T15:28:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:43 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/863849729' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T15:28:44.104 INFO:teuthology.orchestra.run.vm05.stdout:85899345931 2026-03-09T15:28:44.295 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345931 got 85899345931 for osd.3 2026-03-09T15:28:44.295 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:44.547 INFO:teuthology.orchestra.run.vm05.stdout:98784247817 2026-03-09T15:28:44.609 INFO:teuthology.orchestra.run.vm05.stdout:34359738384 2026-03-09T15:28:44.620 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph osd last-stat-seq osd.2 2026-03-09T15:28:44.796 INFO:tasks.cephadm.ceph_manager.ceph:need seq 98784247817 got 98784247817 for osd.4 2026-03-09T15:28:44.796 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:44.799 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:44 vm05 ceph-mon[49764]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:44.799 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:44 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3983607808' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-09T15:28:44.799 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:44 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1927210132' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-09T15:28:44.799 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:44 vm05 ceph-mon[54361]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:44.799 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:44 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3983607808' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-09T15:28:44.799 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:44 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1927210132' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-09T15:28:44.869 INFO:tasks.cephadm.ceph_manager.ceph:need seq 34359738384 got 34359738384 for osd.0 2026-03-09T15:28:44.869 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:44.885 INFO:teuthology.orchestra.run.vm05.stdout:120259084296 2026-03-09T15:28:44.917 INFO:teuthology.orchestra.run.vm05.stdout:47244640270 2026-03-09T15:28:44.931 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:44.942 INFO:teuthology.orchestra.run.vm05.stdout:150323855366 2026-03-09T15:28:44.974 INFO:teuthology.orchestra.run.vm05.stdout:171798691844 2026-03-09T15:28:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:44 vm09 ceph-mon[49358]: pgmap v7: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:44 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3983607808' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 3}]: dispatch 2026-03-09T15:28:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:44 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1927210132' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 4}]: dispatch 2026-03-09T15:28:45.133 INFO:tasks.cephadm.ceph_manager.ceph:need seq 120259084296 got 120259084296 for osd.5 2026-03-09T15:28:45.133 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:45.164 INFO:tasks.cephadm.ceph_manager.ceph:need seq 150323855366 got 150323855366 for osd.6 2026-03-09T15:28:45.164 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:45.165 INFO:tasks.cephadm.ceph_manager.ceph:need seq 171798691844 got 171798691844 for osd.7 2026-03-09T15:28:45.165 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:45.184 INFO:tasks.cephadm.ceph_manager.ceph:need seq 47244640270 got 47244640270 for osd.1 2026-03-09T15:28:45.184 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:45.446 INFO:teuthology.orchestra.run.vm05.stdout:60129542157 2026-03-09T15:28:45.514 INFO:tasks.cephadm.ceph_manager.ceph:need seq 60129542157 got 60129542157 for osd.2 2026-03-09T15:28:45.514 DEBUG:teuthology.parallel:result is None 2026-03-09T15:28:45.515 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-03-09T15:28:45.515 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph pg dump --format=json 2026-03-09T15:28:45.592 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1559451141' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T15:28:45.679 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3437570511' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3202152865' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2456237271' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3862076300' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2600620583' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1559451141' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3437570511' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3202152865' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2456237271' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3862076300' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:45.848 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:45 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2600620583' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T15:28:46.025 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:46.028 INFO:teuthology.orchestra.run.vm05.stderr:dumped all 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1559451141' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 0}]: dispatch 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3437570511' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 5}]: dispatch 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3202152865' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 1}]: dispatch 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2456237271' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 6}]: dispatch 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3862076300' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 7}]: dispatch 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:45 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2600620583' entity='client.admin' cmd=[{"prefix": "osd last-stat-seq", "id": 2}]: dispatch 2026-03-09T15:28:46.072 INFO:teuthology.orchestra.run.vm05.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-09T15:28:44.975890+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48648,"kb_used_data":4488,"kb_used_omap":0,"kb_used_meta":44096,"kb_avail":167690744,"statfs":{"total":171765137408,"available":171715321856,"internally_reserved":0,"allocated":4595712,"data_stored":2597203,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45154304},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.947897"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-09T15:28:35.068852+0000","last_change":"2026-03-09T15:28:28.890681+0000","last_active":"2026-03-09T15:28:35.068852+0000","last_peered":"2026-03-09T15:28:35.068852+0000","last_clean":"2026-03-09T15:28:35.068852+0000","last_became_active":"2026-03-09T15:28:28.583318+0000","last_became_peered":"2026-03-09T15:28:28.583318+0000","last_unstale":"2026-03-09T15:28:35.068852+0000","last_undegraded":"2026-03-09T15:28:35.068852+0000","last_fullsized":"2026-03-09T15:28:35.068852+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":15,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-09T15:27:45.921333+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-09T15:27:45.921333+0000","last_clean_scrub_stamp":"2026-03-09T15:27:45.921333+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T21:06:49.928859+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691844,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6064,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961360,"statfs":{"total":21470642176,"available":21464432640,"internally_reserved":0,"allocated":827392,"data_stored":573449,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.6140000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.5}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.7190000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.8160000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64100000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64800000000000002}]}]},{"osd":6,"up_from":35,"seq":150323855366,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6128,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961296,"statfs":{"total":21470642176,"available":21464367104,"internally_reserved":0,"allocated":827392,"data_stored":573449,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70399999999999996}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88400000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64500000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41899999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.40699999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42699999999999999}]}]},{"osd":1,"up_from":11,"seq":47244640270,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6240,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961184,"statfs":{"total":21470642176,"available":21464252416,"internally_reserved":0,"allocated":417792,"data_stored":174979,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Mon Mar 9 15:28:40 2026","interfaces":[{"interface":"back","average":{"1min":0.53900000000000003,"5min":0.53900000000000003,"15min":0.53900000000000003},"min":{"1min":0.22700000000000001,"5min":0.22700000000000001,"15min":0.22700000000000001},"max":{"1min":0.78700000000000003,"5min":0.78700000000000003,"15min":0.78700000000000003},"last":0.78700000000000003},{"interface":"front","average":{"1min":0.499,"5min":0.499,"15min":0.499},"min":{"1min":0.223,"5min":0.223,"15min":0.223},"max":{"1min":0.72899999999999998,"5min":0.72899999999999998,"15min":0.72899999999999998},"last":0.64300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77000000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68100000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77800000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.749}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65000000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63200000000000001}]}]},{"osd":0,"up_from":8,"seq":34359738384,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6704,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960720,"statfs":{"total":21470642176,"available":21463777280,"internally_reserved":0,"allocated":827392,"data_stored":573449,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50600000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45900000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49299999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52500000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65600000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60499999999999998}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]}]},{"osd":2,"up_from":14,"seq":60129542157,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6244,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961180,"statfs":{"total":21470642176,"available":21464248320,"internally_reserved":0,"allocated":421888,"data_stored":175365,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69399999999999995}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65300000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68000000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66600000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.627}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46800000000000003}]}]},{"osd":3,"up_from":20,"seq":85899345931,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5736,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961688,"statfs":{"total":21470642176,"available":21464768512,"internally_reserved":0,"allocated":425984,"data_stored":175609,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55300000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53700000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56499999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63400000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71499999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.628}]}]},{"osd":4,"up_from":23,"seq":98784247817,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5800,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961624,"statfs":{"total":21470642176,"available":21464702976,"internally_reserved":0,"allocated":425984,"data_stored":175609,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79300000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81200000000000006}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66000000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67500000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71899999999999997}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73099999999999998}]}]},{"osd":5,"up_from":28,"seq":120259084296,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":175294,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2509999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.468}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77800000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.113}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94099999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82799999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85199999999999998}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-09T15:28:46.072 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph pg dump --format=json 2026-03-09T15:28:46.227 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:46.545 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:46.546 INFO:teuthology.orchestra.run.vm05.stderr:dumped all 2026-03-09T15:28:46.616 INFO:teuthology.orchestra.run.vm05.stdout:{"pg_ready":true,"pg_map":{"version":8,"stamp":"2026-03-09T15:28:44.975890+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":3,"num_per_pool_omap_osds":3,"kb":167739392,"kb_used":48648,"kb_used_data":4488,"kb_used_omap":0,"kb_used_meta":44096,"kb_avail":167690744,"statfs":{"total":171765137408,"available":171715321856,"internally_reserved":0,"allocated":4595712,"data_stored":2597203,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":45154304},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.947897"},"pg_stats":[{"pgid":"1.0","version":"43'87","reported_seq":56,"reported_epoch":43,"state":"active+clean","last_fresh":"2026-03-09T15:28:35.068852+0000","last_change":"2026-03-09T15:28:28.890681+0000","last_active":"2026-03-09T15:28:35.068852+0000","last_peered":"2026-03-09T15:28:35.068852+0000","last_clean":"2026-03-09T15:28:35.068852+0000","last_became_active":"2026-03-09T15:28:28.583318+0000","last_became_peered":"2026-03-09T15:28:28.583318+0000","last_unstale":"2026-03-09T15:28:35.068852+0000","last_undegraded":"2026-03-09T15:28:35.068852+0000","last_fullsized":"2026-03-09T15:28:35.068852+0000","mapping_epoch":41,"log_start":"0'0","ondisk_log_start":"0'0","created":15,"last_epoch_clean":42,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-03-09T15:27:45.921333+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-03-09T15:27:45.921333+0000","last_clean_scrub_stamp":"2026-03-09T15:27:45.921333+0000","objects_scrubbed":0,"log_size":87,"ondisk_log_size":87,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-03-10T21:06:49.928859+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,6],"acting":[7,0,6],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":459280,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":192,"num_read_kb":288,"num_write":133,"num_write_kb":1372,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":2,"num_bytes_recovered":397840,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1204224,"data_stored":1193520,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":87,"ondisk_log_size":87,"up":3,"acting":3,"num_store_stats":3}],"osd_stats":[{"osd":7,"up_from":40,"seq":171798691844,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6064,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5248,"kb_avail":20961360,"statfs":{"total":21470642176,"available":21464432640,"internally_reserved":0,"allocated":827392,"data_stored":573449,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5373952},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.6140000000000001}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.5}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.7190000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.8160000000000001}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64100000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.48899999999999999}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64800000000000002}]}]},{"osd":6,"up_from":35,"seq":150323855366,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6128,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961296,"statfs":{"total":21470642176,"available":21464367104,"internally_reserved":0,"allocated":827392,"data_stored":573449,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.70399999999999996}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63300000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.88400000000000001}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64500000000000002}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.41899999999999998}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.40699999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.42699999999999999}]}]},{"osd":1,"up_from":11,"seq":47244640270,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6240,"kb_used_data":408,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961184,"statfs":{"total":21470642176,"available":21464252416,"internally_reserved":0,"allocated":417792,"data_stored":174979,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Mon Mar 9 15:28:40 2026","interfaces":[{"interface":"back","average":{"1min":0.53900000000000003,"5min":0.53900000000000003,"15min":0.53900000000000003},"min":{"1min":0.22700000000000001,"5min":0.22700000000000001,"15min":0.22700000000000001},"max":{"1min":0.78700000000000003,"5min":0.78700000000000003,"15min":0.78700000000000003},"last":0.78700000000000003},{"interface":"front","average":{"1min":0.499,"5min":0.499,"15min":0.499},"min":{"1min":0.223,"5min":0.223,"15min":0.223},"max":{"1min":0.72899999999999998,"5min":0.72899999999999998,"15min":0.72899999999999998},"last":0.64300000000000002}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77000000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68100000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77800000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.749}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65000000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63200000000000001}]}]},{"osd":0,"up_from":8,"seq":34359738384,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20967424,"kb_used":6704,"kb_used_data":808,"kb_used_omap":0,"kb_used_meta":5888,"kb_avail":20960720,"statfs":{"total":21470642176,"available":21463777280,"internally_reserved":0,"allocated":827392,"data_stored":573449,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":6029312},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.50600000000000001}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.45900000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.49299999999999999}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.52500000000000002}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65600000000000003}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.60499999999999998}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]}]},{"osd":2,"up_from":14,"seq":60129542157,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":6244,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5824,"kb_avail":20961180,"statfs":{"total":21470642176,"available":21464248320,"internally_reserved":0,"allocated":421888,"data_stored":175365,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5963776},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.69399999999999995}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.65300000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.68000000000000005}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66600000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.627}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67300000000000004}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.46800000000000003}]}]},{"osd":3,"up_from":20,"seq":85899345931,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5736,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961688,"statfs":{"total":21470642176,"available":21464768512,"internally_reserved":0,"allocated":425984,"data_stored":175609,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.55300000000000005}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.53700000000000003}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.56499999999999995}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.63400000000000001}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.64500000000000002}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71499999999999997}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.628}]}]},{"osd":4,"up_from":23,"seq":98784247817,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5800,"kb_used_data":416,"kb_used_omap":0,"kb_used_meta":5376,"kb_avail":20961624,"statfs":{"total":21470642176,"available":21464702976,"internally_reserved":0,"allocated":425984,"data_stored":175609,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5505024},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.79300000000000004}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.81200000000000006}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.66000000000000003}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.67500000000000004}]},{"osd":5,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.71899999999999997}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77700000000000002}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.73099999999999998}]}]},{"osd":5,"up_from":28,"seq":120259084296,"num_pgs":0,"num_osds":1,"num_per_pool_osds":0,"num_per_pool_omap_osds":0,"kb":20967424,"kb_used":5732,"kb_used_data":412,"kb_used_omap":0,"kb_used_meta":5312,"kb_avail":20961692,"statfs":{"total":21470642176,"available":21464772608,"internally_reserved":0,"allocated":421888,"data_stored":175294,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":5439488},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[{"osd":0,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.2509999999999999}]},{"osd":1,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.468}]},{"osd":2,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.77800000000000002}]},{"osd":3,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":1.113}]},{"osd":4,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.94099999999999995}]},{"osd":6,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.82799999999999996}]},{"osd":7,"last update":"Thu Jan 1 00:00:00 1970","interfaces":[{"interface":"back","average":{"1min":0,"5min":0,"15min":0},"min":{"1min":0,"5min":0,"15min":0},"max":{"1min":0,"5min":0,"15min":0},"last":0.85199999999999998}]}]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":6,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":401408,"data_stored":397840,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-03-09T15:28:46.616 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-03-09T15:28:46.616 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-03-09T15:28:46.617 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-03-09T15:28:46.617 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph health --format=json 2026-03-09T15:28:46.731 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:46 vm09 ceph-mon[49358]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:46.764 INFO:teuthology.orchestra.run.vm05.stderr:Inferring config /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/mon.a/config 2026-03-09T15:28:46.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:46 vm05 ceph-mon[49764]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:46.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:46 vm05 ceph-mon[54361]: pgmap v8: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:47.032 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:46 vm09 systemd[1]: Starting Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:28:47.136 INFO:teuthology.orchestra.run.vm05.stdout: 2026-03-09T15:28:47.136 INFO:teuthology.orchestra.run.vm05.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-03-09T15:28:47.194 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-03-09T15:28:47.194 INFO:tasks.cephadm:Setup complete, yielding 2026-03-09T15:28:47.194 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T15:28:47.196 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm05.local 2026-03-09T15:28:47.196 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- bash -c 'ceph config set mgr mgr/cephadm/use_repo_digest false --force' 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 podman[64055]: 2026-03-09 15:28:47.032093961 +0000 UTC m=+0.052475714 container create 238b579fef25670b2cf11a44acff7b59575447f29edb069a28074834e39c7d1c (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 podman[64055]: 2026-03-09 15:28:47.061552683 +0000 UTC m=+0.081934436 container init 238b579fef25670b2cf11a44acff7b59575447f29edb069a28074834e39c7d1c (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 podman[64055]: 2026-03-09 15:28:47.064213911 +0000 UTC m=+0.084595664 container start 238b579fef25670b2cf11a44acff7b59575447f29edb069a28074834e39c7d1c (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 bash[64055]: 238b579fef25670b2cf11a44acff7b59575447f29edb069a28074834e39c7d1c 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 podman[64055]: 2026-03-09 15:28:46.99038662 +0000 UTC m=+0.010768382 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 systemd[1]: Started Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:28:47.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.093Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.093Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.093Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.093Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm09 (none))" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.093Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.093Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.094Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.094Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.098Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.098Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.098Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=20.488µs 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.098Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.099Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=0 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.099Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=96.721µs wal_replay_duration=166.722µs total_replay_duration=381.293µs 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.100Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.100Z caller=main.go:947 level=info msg="TSDB started" 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.100Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.110Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=10.310205ms db_storage=501ns remote_storage=1.493µs web_handler=511ns query_engine=572ns scrape=441.616µs scrape_sd=15.629µs notify=330ns notify_sd=1.263µs rules=9.603342ms 2026-03-09T15:28:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:28:47 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:28:47.110Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-09T15:28:47.732 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[49764]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:28:47.732 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[49764]: from='client.24427 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:28:47.732 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:47.732 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1558619569' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T15:28:47.733 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[54361]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:28:47.733 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[54361]: from='client.24427 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:28:47.733 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:47.733 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:47 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1558619569' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T15:28:47.772 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T15:28:47.774 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm05.local 2026-03-09T15:28:47.774 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin realm create --rgw-realm=r --default' 2026-03-09T15:28:48.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:47 vm09 ceph-mon[49358]: from='client.14550 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:28:48.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:47 vm09 ceph-mon[49358]: from='client.24427 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-03-09T15:28:48.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:47 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:48.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:47 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1558619569' entity='client.admin' cmd=[{"prefix": "health", "format": "json"}]: dispatch 2026-03-09T15:28:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:48 vm05 ceph-mon[49764]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:48 vm05 ceph-mon[49764]: Deploying daemon alertmanager.a on vm05 2026-03-09T15:28:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:48 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' 2026-03-09T15:28:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:48 vm05 ceph-mon[54361]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:48 vm05 ceph-mon[54361]: Deploying daemon alertmanager.a on vm05 2026-03-09T15:28:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:48 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' 2026-03-09T15:28:49.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:48 vm09 ceph-mon[49358]: pgmap v9: 1 pgs: 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:49.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:48 vm09 ceph-mon[49358]: Deploying daemon alertmanager.a on vm05 2026-03-09T15:28:49.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:48 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' 2026-03-09T15:28:49.854 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:28:49.854 INFO:teuthology.orchestra.run.vm05.stdout: "id": "9d9a4289-6dc5-46ab-beb6-2c16bcc13948", 2026-03-09T15:28:49.854 INFO:teuthology.orchestra.run.vm05.stdout: "name": "r", 2026-03-09T15:28:49.854 INFO:teuthology.orchestra.run.vm05.stdout: "current_period": "44b69931-1b2d-40e0-8eef-9011af563413", 2026-03-09T15:28:49.854 INFO:teuthology.orchestra.run.vm05.stdout: "epoch": 1 2026-03-09T15:28:49.854 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:28:49.913 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zonegroup create --rgw-zonegroup=default --master --default' 2026-03-09T15:28:49.959 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[49764]: osdmap e44: 8 total, 8 up, 8 in 2026-03-09T15:28:49.959 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T15:28:49.959 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/557354448' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T15:28:49.959 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[49764]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:49.960 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[54361]: osdmap e44: 8 total, 8 up, 8 in 2026-03-09T15:28:49.960 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T15:28:49.960 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/557354448' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T15:28:49.960 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:49 vm05 ceph-mon[54361]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:50.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:49 vm09 ceph-mon[49358]: osdmap e44: 8 total, 8 up, 8 in 2026-03-09T15:28:50.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:49 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T15:28:50.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:49 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/557354448' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]: dispatch 2026-03-09T15:28:50.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:49 vm09 ceph-mon[49358]: pgmap v11: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "id": "fdc8ac7a-9808-4f42-91d3-af6bc1bb42f5", 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "name": "default", 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "api_name": "default", 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "is_master": "true", 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "endpoints": [], 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "hostnames": [], 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "hostnames_s3website": [], 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "master_zone": "", 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "zones": [], 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "placement_targets": [], 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "default_placement": "", 2026-03-09T15:28:50.478 INFO:teuthology.orchestra.run.vm05.stdout: "realm_id": "9d9a4289-6dc5-46ab-beb6-2c16bcc13948", 2026-03-09T15:28:50.479 INFO:teuthology.orchestra.run.vm05.stdout: "sync_policy": { 2026-03-09T15:28:50.479 INFO:teuthology.orchestra.run.vm05.stdout: "groups": [] 2026-03-09T15:28:50.479 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:28:50.479 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:28:50.545 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default' 2026-03-09T15:28:50.818 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-09T15:28:50.818 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: osdmap e45: 8 total, 8 up, 8 in 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[49764]: Deploying daemon grafana.a on vm09 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: osdmap e45: 8 total, 8 up, 8 in 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:50.819 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:50 vm05 ceph-mon[54361]: Deploying daemon grafana.a on vm09 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: osdmap e45: 8 total, 8 up, 8 in 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:50 vm09 ceph-mon[49358]: Deploying daemon grafana.a on vm09 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "id": "919e3a32-dcf2-4549-a997-dd2af445626c", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "name": "z", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "domain_root": "z.rgw.meta:root", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "control_pool": "z.rgw.control", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "gc_pool": "z.rgw.log:gc", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "lc_pool": "z.rgw.log:lc", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "log_pool": "z.rgw.log", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "intent_log_pool": "z.rgw.log:intent", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "usage_log_pool": "z.rgw.log:usage", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "roles_pool": "z.rgw.meta:roles", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "reshard_pool": "z.rgw.log:reshard", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "user_keys_pool": "z.rgw.meta:users.keys", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "user_email_pool": "z.rgw.meta:users.email", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "user_swift_pool": "z.rgw.meta:users.swift", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "user_uid_pool": "z.rgw.meta:users.uid", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "otp_pool": "z.rgw.otp", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "system_key": { 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "access_key": "", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "secret_key": "" 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "placement_pools": [ 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "key": "default-placement", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "val": { 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "index_pool": "z.rgw.buckets.index", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "storage_classes": { 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "STANDARD": { 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "data_pool": "z.rgw.buckets.data" 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "data_extra_pool": "z.rgw.buckets.non-ec", 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: "index_type": 0 2026-03-09T15:28:51.109 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:28:51.110 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:28:51.110 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:28:51.110 INFO:teuthology.orchestra.run.vm05.stdout: "realm_id": "9d9a4289-6dc5-46ab-beb6-2c16bcc13948", 2026-03-09T15:28:51.110 INFO:teuthology.orchestra.run.vm05.stdout: "notif_pool": "z.rgw.log:notif" 2026-03-09T15:28:51.110 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:28:51.154 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'radosgw-admin period update --rgw-realm=r --commit' 2026-03-09T15:28:51.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:51 vm05 ceph-mon[49764]: osdmap e46: 8 total, 8 up, 8 in 2026-03-09T15:28:51.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:51 vm05 ceph-mon[49764]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:51.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:51 vm05 ceph-mon[54361]: osdmap e46: 8 total, 8 up, 8 in 2026-03-09T15:28:51.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:51 vm05 ceph-mon[54361]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:52.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:51 vm09 ceph-mon[49358]: osdmap e46: 8 total, 8 up, 8 in 2026-03-09T15:28:52.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:51 vm09 ceph-mon[49358]: pgmap v14: 33 pgs: 32 unknown, 1 active+clean; 449 KiB data, 48 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:28:52.735 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:28:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[72090]: level=info ts=2026-03-09T15:28:52.482Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000510959s 2026-03-09T15:28:54.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:53 vm09 ceph-mon[49358]: osdmap e47: 8 total, 8 up, 8 in 2026-03-09T15:28:54.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:53 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2750208628' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T15:28:54.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:53 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T15:28:54.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:53 vm09 ceph-mon[49358]: pgmap v16: 65 pgs: 32 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.6 KiB/s wr, 4 op/s 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[49764]: osdmap e47: 8 total, 8 up, 8 in 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2750208628' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[49764]: pgmap v16: 65 pgs: 32 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.6 KiB/s wr, 4 op/s 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[54361]: osdmap e47: 8 total, 8 up, 8 in 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2750208628' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]: dispatch 2026-03-09T15:28:54.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:53 vm05 ceph-mon[54361]: pgmap v16: 65 pgs: 32 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.6 KiB/s wr, 4 op/s 2026-03-09T15:28:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:54 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-09T15:28:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:54 vm09 ceph-mon[49358]: osdmap e48: 8 total, 8 up, 8 in 2026-03-09T15:28:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:54 vm09 ceph-mon[49358]: osdmap e49: 8 total, 8 up, 8 in 2026-03-09T15:28:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:54 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2750208628' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T15:28:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:54 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[49764]: osdmap e48: 8 total, 8 up, 8 in 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[49764]: osdmap e49: 8 total, 8 up, 8 in 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2750208628' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.log","app": "rgw"}]': finished 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[54361]: osdmap e48: 8 total, 8 up, 8 in 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[54361]: osdmap e49: 8 total, 8 up, 8 in 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2750208628' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T15:28:55.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:54 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]: dispatch 2026-03-09T15:28:56.391 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[49764]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.6 KiB/s wr, 4 op/s 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[49764]: osdmap e50: 8 total, 8 up, 8 in 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[54361]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.6 KiB/s wr, 4 op/s 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-09T15:28:56.392 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:56 vm05 ceph-mon[54361]: osdmap e50: 8 total, 8 up, 8 in 2026-03-09T15:28:56.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:56 vm09 ceph-mon[49358]: pgmap v19: 97 pgs: 64 unknown, 33 active+clean; 449 KiB data, 49 MiB used, 160 GiB / 160 GiB avail; 1.4 KiB/s rd, 2.6 KiB/s wr, 4 op/s 2026-03-09T15:28:56.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:56 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:28:56.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:56 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.control","app": "rgw"}]': finished 2026-03-09T15:28:56.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:56 vm09 ceph-mon[49358]: osdmap e50: 8 total, 8 up, 8 in 2026-03-09T15:28:56.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:28:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:28:56] "GET /metrics HTTP/1.1" 200 192168 "" "Prometheus/2.33.4" 2026-03-09T15:28:58.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:57 vm09 ceph-mon[49358]: osdmap e51: 8 total, 8 up, 8 in 2026-03-09T15:28:58.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:57 vm09 ceph-mon[49358]: pgmap v22: 97 pgs: 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-03-09T15:28:58.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:57 vm05 ceph-mon[49764]: osdmap e51: 8 total, 8 up, 8 in 2026-03-09T15:28:58.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:57 vm05 ceph-mon[49764]: pgmap v22: 97 pgs: 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-03-09T15:28:58.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:57 vm05 ceph-mon[54361]: osdmap e51: 8 total, 8 up, 8 in 2026-03-09T15:28:58.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:57 vm05 ceph-mon[54361]: pgmap v22: 97 pgs: 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-03-09T15:28:58.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:28:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:28:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:28:58.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:58 vm09 ceph-mon[49358]: osdmap e52: 8 total, 8 up, 8 in 2026-03-09T15:28:58.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:58 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-09T15:28:59.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:58 vm05 ceph-mon[49764]: osdmap e52: 8 total, 8 up, 8 in 2026-03-09T15:28:59.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:58 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-09T15:28:59.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:58 vm05 ceph-mon[54361]: osdmap e52: 8 total, 8 up, 8 in 2026-03-09T15:28:59.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:58 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]: dispatch 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: osdmap e53: 8 total, 8 up, 8 in 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: pgmap v25: 129 pgs: 32 unknown, 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: osdmap e54: 8 total, 8 up, 8 in 2026-03-09T15:29:00.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:28:59 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: osdmap e53: 8 total, 8 up, 8 in 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: pgmap v25: 129 pgs: 32 unknown, 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: osdmap e54: 8 total, 8 up, 8 in 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "z.rgw.meta","app": "rgw"}]': finished 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: osdmap e53: 8 total, 8 up, 8 in 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]: dispatch 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: pgmap v25: 129 pgs: 32 unknown, 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: osdmap e54: 8 total, 8 up, 8 in 2026-03-09T15:29:00.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:28:59 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd=[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]: dispatch 2026-03-09T15:29:00.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:00 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[72090]: level=info ts=2026-03-09T15:29:00.485Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.003624089s 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "id": "849d8414-8b9d-43cc-a491-e44137271a7d", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "epoch": 1, 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "predecessor_uuid": "44b69931-1b2d-40e0-8eef-9011af563413", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "sync_status": [], 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "period_map": { 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "id": "849d8414-8b9d-43cc-a491-e44137271a7d", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "zonegroups": [ 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "id": "fdc8ac7a-9808-4f42-91d3-af6bc1bb42f5", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "name": "default", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "api_name": "default", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "is_master": "true", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "endpoints": [], 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "hostnames": [], 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "hostnames_s3website": [], 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "master_zone": "919e3a32-dcf2-4549-a997-dd2af445626c", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "zones": [ 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "id": "919e3a32-dcf2-4549-a997-dd2af445626c", 2026-03-09T15:29:01.094 INFO:teuthology.orchestra.run.vm05.stdout: "name": "z", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "endpoints": [], 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "log_meta": "false", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "log_data": "false", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "bucket_index_max_shards": 11, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "read_only": "false", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "tier_type": "", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "sync_from_all": "true", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "sync_from": [], 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "redirect_zone": "" 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "placement_targets": [ 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "name": "default-placement", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "tags": [], 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "storage_classes": [ 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "STANDARD" 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "default_placement": "default-placement", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "realm_id": "9d9a4289-6dc5-46ab-beb6-2c16bcc13948", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "sync_policy": { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "groups": [] 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "short_zone_ids": [ 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "key": "919e3a32-dcf2-4549-a997-dd2af445626c", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "val": 1790742450 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "master_zonegroup": "fdc8ac7a-9808-4f42-91d3-af6bc1bb42f5", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "master_zone": "919e3a32-dcf2-4549-a997-dd2af445626c", 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "period_config": { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "bucket_quota": { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "enabled": false, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "check_on_raw": false, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_size": -1, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_size_kb": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_objects": -1 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "user_quota": { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "enabled": false, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "check_on_raw": false, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_size": -1, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_size_kb": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_objects": -1 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "user_ratelimit": { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_read_ops": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_write_ops": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_read_bytes": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_write_bytes": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "enabled": false 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "bucket_ratelimit": { 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_read_ops": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_write_ops": 0, 2026-03-09T15:29:01.095 INFO:teuthology.orchestra.run.vm05.stdout: "max_read_bytes": 0, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "max_write_bytes": 0, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "enabled": false 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "anonymous_ratelimit": { 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "max_read_ops": 0, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "max_write_ops": 0, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "max_read_bytes": 0, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "max_write_bytes": 0, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "enabled": false 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "realm_id": "9d9a4289-6dc5-46ab-beb6-2c16bcc13948", 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "realm_name": "r", 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout: "realm_epoch": 2 2026-03-09T15:29:01.096 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:29:01.140 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw foo --realm r --zone z --placement=2 --port=8000' 2026-03-09T15:29:01.649 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled rgw.foo update... 2026-03-09T15:29:01.717 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply rgw smpl' 2026-03-09T15:29:01.914 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[54361]: osdmap e55: 8 total, 8 up, 8 in 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[54361]: pgmap v28: 129 pgs: 32 unknown, 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[54361]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[54361]: Saving service rgw.foo spec with placement count:2 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[49764]: osdmap e55: 8 total, 8 up, 8 in 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[49764]: pgmap v28: 129 pgs: 32 unknown, 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[49764]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[49764]: Saving service rgw.foo spec with placement count:2 2026-03-09T15:29:01.915 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:01 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:02.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:01 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2374297859' entity='client.admin' cmd='[{"prefix": "osd pool set", "pool": "z.rgw.meta", "var": "pg_num_min", "val": "8"}]': finished 2026-03-09T15:29:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:01 vm09 ceph-mon[49358]: osdmap e55: 8 total, 8 up, 8 in 2026-03-09T15:29:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:01 vm09 ceph-mon[49358]: pgmap v28: 129 pgs: 32 unknown, 11 creating+activating, 86 active+clean; 451 KiB data, 50 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:01 vm09 ceph-mon[49358]: from='client.14622 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "foo", "realm": "r", "zone": "z", "placement": "2", "port": 8000, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:01 vm09 ceph-mon[49358]: Saving service rgw.foo spec with placement count:2 2026-03-09T15:29:02.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:01 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:02.470 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled rgw.smpl update... 2026-03-09T15:29:02.543 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph osd pool create foo' 2026-03-09T15:29:03.667 INFO:teuthology.orchestra.run.vm05.stderr:pool 'foo' created 2026-03-09T15:29:03.715 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'rbd pool init foo' 2026-03-09T15:29:03.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:03 vm09 ceph-mon[49358]: from='client.24502 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:03.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:03 vm09 ceph-mon[49358]: Saving service rgw.smpl spec with placement count:2 2026-03-09T15:29:03.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:03 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:03.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:03 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2635984443' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T15:29:03.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:03 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[49764]: from='client.24502 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[49764]: Saving service rgw.smpl spec with placement count:2 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2635984443' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[54361]: from='client.24502 -' entity='client.admin' cmd=[{"prefix": "orch apply rgw", "svc_id": "smpl", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[54361]: Saving service rgw.smpl spec with placement count:2 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2635984443' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T15:29:03.906 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:03 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool create", "pool": "foo"}]: dispatch 2026-03-09T15:29:04.276 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 systemd[1]: Starting Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 podman[64334]: 2026-03-09 15:29:04.276181771 +0000 UTC m=+0.015496106 container create 3727d7279dc9a551fdbc1f0edbf538c16f8a9429809ea4974c28c752717fe905 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.buildah.version=1.24.2, com.redhat.component=ubi8-container, distribution-scope=public, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8.5, summary=Grafana Container configured for Ceph mgr/dashboard integration, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, name=ubi8, maintainer=Paul Cuzner , architecture=x86_64, vcs-type=git, release=236.1648460182, vendor=Red Hat, Inc., io.openshift.expose-services=, io.k8s.display-name=Red Hat Universal Base Image 8, description=Ceph Grafana Container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.openshift.tags=base rhel8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, build-date=2022-03-28T10:36:18.413762) 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 podman[64334]: 2026-03-09 15:29:04.323645771 +0000 UTC m=+0.062960115 container init 3727d7279dc9a551fdbc1f0edbf538c16f8a9429809ea4974c28c752717fe905 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, build-date=2022-03-28T10:36:18.413762, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, release=236.1648460182, name=ubi8, maintainer=Paul Cuzner , io.k8s.display-name=Red Hat Universal Base Image 8, io.openshift.tags=base rhel8, distribution-scope=public, architecture=x86_64, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., description=Ceph Grafana Container, version=8.5, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.openshift.expose-services=, io.buildah.version=1.24.2, vendor=Red Hat, Inc., vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, summary=Grafana Container configured for Ceph mgr/dashboard integration) 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 podman[64334]: 2026-03-09 15:29:04.326542639 +0000 UTC m=+0.065856974 container start 3727d7279dc9a551fdbc1f0edbf538c16f8a9429809ea4974c28c752717fe905 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, io.buildah.version=1.24.2, com.redhat.component=ubi8-container, io.k8s.display-name=Red Hat Universal Base Image 8, description=Ceph Grafana Container, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, summary=Grafana Container configured for Ceph mgr/dashboard integration, distribution-scope=public, vcs-type=git, maintainer=Paul Cuzner , version=8.5, build-date=2022-03-28T10:36:18.413762, release=236.1648460182, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, io.openshift.tags=base rhel8, architecture=x86_64, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, name=ubi8) 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 bash[64334]: 3727d7279dc9a551fdbc1f0edbf538c16f8a9429809ea4974c28c752717fe905 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 podman[64334]: 2026-03-09 15:29:04.270083153 +0000 UTC m=+0.009397488 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 systemd[1]: Started Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="App mode production" logger=settings 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create migration_log table" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create user table" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.login" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user.email" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_login - v1" 2026-03-09T15:29:04.533 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_user_email - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table user to user_v1 - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create user table v2" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_login - v2" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_user_email - v2" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table user_v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column help_flags1 to user table" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update user table charset" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add last_seen_at column to user" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add missing user data" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_disabled column to user" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index user.login/user.email" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add is_service_account column to user" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create temp user table v1-7" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v1-7" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v1-7" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v1-7" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v1-7" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update temp_user table charset" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_email - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_org_id - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_code - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_temp_user_status - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table temp_user to temp_user_tmp_qwerty - v1" 2026-03-09T15:29:04.534 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create temp_user v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_email - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_org_id - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_code - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_temp_user_status - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy temp_user v1 to v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop temp_user_tmp_qwerty" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Set created for temp users that will otherwise prematurely expire" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create star table" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index star.user_id_dashboard_id" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create org table v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_name - v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create org_user table v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_org_id - v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_org_user_org_id_user_id - v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_org_user_user_id - v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update org table charset" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update org_user table charset" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate all Read Only Viewers to Viewers" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard table" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard.account_id" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_account_id_slug" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_tag table" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_tag.dasboard_id_term" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_dashboard_tag_dashboard_id_term - v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard to dashboard_v1 - v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_org_id - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_org_id_slug - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard v1 to v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard.data to mediumtext v1" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column updated_by in dashboard - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column created_by in dashboard - v2" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column gnetId in dashboard" 2026-03-09T15:29:04.535 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for gnetId in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_id in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for plugin_id in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_id in dashboard_tag" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard table charset" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_tag table charset" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column folder_id in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column isFolder in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column has_acl in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in dashboard" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index dashboard_org_id_uid" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_slug" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard title length" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index for dashboard_org_id_title_folder_id" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning" 2026-03-09T15:29:04.536 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table dashboard_provisioning to dashboard_provisioning_tmp_qwerty - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_provisioning v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id - v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_provisioning_dashboard_id_name - v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy dashboard_provisioning v1 to v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop dashboard_provisioning_tmp_qwerty" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add check_sum column" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_title" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="delete tags for deleted dashboards" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="delete stars for deleted dashboards" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for dashboard_is_folder" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index data_source.account_id" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index data_source.account_id_name" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_data_source_account_id - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_data_source_account_id_name - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table data_source to data_source_v1 - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create data_source table v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_data_source_org_id - v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_data_source_org_id_name - v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy data_source v1 to v2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table data_source_v1 #2" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column with_credentials" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add secure json data column" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update data_source table charset" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update initial version to 1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add read_only data column" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Migrate logging ds to loki ds" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update json_data with nulls" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add uid column" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid value" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index datasource_org_id_uid" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index datasource_org_id_is_default" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.key" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index api_key.account_id_name" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_api_key_account_id - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_key - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_api_key_account_id_name - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table api_key to api_key_v1 - v1" 2026-03-09T15:29:04.537 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create api_key table v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_api_key_org_id - v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_key - v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_api_key_org_id_name - v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy api_key v1 to v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table api_key_v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update api_key table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add expires to api_key table" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add service account foreign key" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v4" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop table dashboard_snapshot_v4 #1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_snapshot table v5 #2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_key - v5" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_dashboard_snapshot_delete_key - v5" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_dashboard_snapshot_user_id - v5" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_snapshot to mediumtext v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update dashboard_snapshot table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external_delete_url to dashboard_snapshots table" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add encrypted dashboard json column" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Change dashboard_encrypted column to MEDIUMBLOB" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create quota table v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_quota_org_id_user_id_target - v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update quota table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create plugin_setting table" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_plugin_setting_org_id_plugin_id - v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column plugin_version to plugin_settings" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update plugin_setting table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create session table" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist table" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old table playlist_item table" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist table v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create playlist item table v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update playlist_item table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop preferences table v3" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create preferences table v3" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update preferences table charset" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column team_id in preferences" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update team_id column values in preferences" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column week_start in preferences" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create alert table v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert org_id & id " 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert state" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert dashboard_id" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_rule_tag.alert_id_tag_id" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_alert_rule_tag_alert_id_tag_id - v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table alert_rule_tag to alert_rule_tag_v1 - v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Create alert_rule_tag table v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_alert_rule_tag_alert_id_tag_id - Add unique index alert_rule_tag.alert_id_tag_id V2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy alert_rule_tag v1 to v2" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop table alert_rule_tag_v1" 2026-03-09T15:29:04.538 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification table v1" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column is_default" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column frequency" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column send_reminder" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column disable_resolve_message" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification org_id & name" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert table charset" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert_notification table charset" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create notification_journal table v1" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index notification_journal org_id & alert_id & notifier_id" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_notification_journal" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_notification_state table v1" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index alert_notification_state org_id & alert_id & notifier_id" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add for to alert table" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column uid in alert_notification" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update uid column values in alert_notification" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index alert_notification_org_id_uid" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index org_id_name" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column secure_settings in alert_notification" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert.settings to mediumtext" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_notification_state_alert_id" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add non-unique index alert_rule_tag_alert_id" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop old annotation table v4" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create annotation table v5" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 0 v3" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 1 v3" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 2 v3" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 3 v3" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index annotation 4 v3" 2026-03-09T15:29:04.539 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update annotation table charset" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column region_id to annotation table" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Drop category_id index" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column tags to annotation table" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v2" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add unique index annotation_tag.annotation_id_tag_id" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index UQE_annotation_tag_annotation_id_tag_id - v2" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table annotation_tag to annotation_tag_v2 - v2" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Create annotation_tag table v3" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index UQE_annotation_tag_annotation_id_tag_id - Add unique index annotation_tag.annotation_id_tag_id V3" 2026-03-09T15:29:04.540 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy annotation_tag v2 to v3" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop table annotation_tag_v2" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Update alert annotations and set TEXT to empty" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add created time to annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add updated time to annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for created in annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for updated in annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Convert existing annotations from seconds to milliseconds" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add epoch_end column" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for epoch_end" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Make epoch_end the same as epoch" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Move region to single row" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch from annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_dashboard_id_panel_id_epoch from annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_dashboard_id_epoch_end_epoch on annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for org_id_epoch_end_epoch on annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Remove index org_id_epoch_epoch_end from annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index for alert_id on annotation table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create test_data table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard_version table v1" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_version.dashboard_id" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_version.dashboard_id and dashboard_version.version" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Set dashboard version to 1 where 0" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="save existing dashboard data in dashboard_version table v1" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter dashboard_version.data to mediumtext v1" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create team table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index team.org_id" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_org_id_name" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create team member table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.org_id" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_member_org_id_team_id_user_id" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_member.team_id" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column email to team table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column external to team_member table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column permission to team_member table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create dashboard acl table" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_dashboard_id" 2026-03-09T15:29:04.541 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_user_id" 2026-03-09T15:29:04.542 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index dashboard_acl_dashboard_id_team_id" 2026-03-09T15:29:04.542 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_user_id" 2026-03-09T15:29:04.542 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_team_id" 2026-03-09T15:29:04.542 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_acl_org_id_role" 2026-03-09T15:29:04.542 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index dashboard_permission" 2026-03-09T15:29:04.542 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="save default acl rules in dashboard_acl table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="delete acl rules for deleted dashboards and folders" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create tag table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index tag.key_value" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create login attempt table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index login_attempt.username" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index IDX_login_attempt_username - v1" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Rename table login_attempt to login_attempt_tmp_qwerty - v1" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create login_attempt v2" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_login_attempt_username - v2" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="copy login_attempt v1 to v2" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop login_attempt_tmp_qwerty" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create index IDX_user_auth_auth_module_auth_id - v1" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter user_auth.auth_id to length 190" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth access token to user_auth" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth refresh token to user_auth" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth token type to user_auth" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add OAuth expiry to user_auth" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add index to user_id column in user_auth" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create server_lock table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index server_lock.operation_uid" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create user auth token table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.auth_token" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_auth_token.prev_auth_token" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_auth_token.user_id" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add revoked_at to the user auth token" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create cache_data table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index cache_data.cache_key" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create short_url table v1" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index short_url.org_id-uid" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition table" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and title columns" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition on org_id and uid columns" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition table data column to mediumtext in mysql" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and title columns" 2026-03-09T15:29:04.807 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop index in alert_definition on org_id and uid columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and title columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index in alert_definition on org_id and uid columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column paused in alert_definition" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="delete alert_definition_version table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="recreate alert_definition_version table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_id and version columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_definition_version table on alert_definition_uid and version columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_definition_version table data column to mediumtext in mysql" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="drop alert_definition_version table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_instance table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, def_uid and current_state columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_instance table on def_org_id, current_state columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column current_state_end to alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, def_uid, current_state on alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="remove index def_org_id, current_state on alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_org_id to rule_org_id in alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="rename def_uid to rule_uid in alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, rule_uid, current_state on alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index rule_org_id, current_state on alert_instance" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and title columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id and uid columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespace_uid, group_uid columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule table data column to mediumtext in mysql" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="remove unique index from alert_rule on org_id, title columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, namespase_uid and title columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add dashboard_uid column to alert_rule" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add panel_id column to alert_rule" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule on org_id, dashboard_uid and panel_id columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create alert_rule_version table" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_uid and version columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_rule_version table on rule_org_id, rule_namespace_uid and rule_group columns" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alter alert_rule_version table data column to mediumtext in mysql" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column for to alert_rule_version" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column annotations to alert_rule_version" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column labels to alert_rule_version" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id=create_alert_configuration_table 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column default in alert_configuration" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="alert alert_configuration alertmanager_configuration column from TEXT to MEDIUMTEXT if mysql" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column org_id in alert_configuration" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in alert_configuration table on org_id column" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id=create_ngalert_configuration_table 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index in ngalert_configuration on org_id column" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="clear migration entry \"remove unified alerting data\"" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="move dashboard alerts to unified alerting" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element table v1" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element org_id-folder_id-name-kind" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create library_element_connection table v1" 2026-03-09T15:29:04.808 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index library_element_connection element_id-kind-connection_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index library_element org_id_uid" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="clone move dashboard alerts to unified alerting" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create data_keys table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create kv_store table v1" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index kv_store.org_id-namespace-key" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="update dashboard_uid and panel_id from existing annotations" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create permission table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index permission.role_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_id_action_scope" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create role table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column display_name" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add column group_name" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index role.org_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role_org_id_name" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index role_org_id_uid" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create team role table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.org_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index team_role_org_id_team_id_role_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index team_role.team_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create user role table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.org_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index user_role_org_id_user_id_role_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index user_role.user_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create builtin role table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.role_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.name" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Add column org_id to builtin_role table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add index builtin_role.org_id" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_org_id_role_id_role" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="Remove unique index role_org_id_uid" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index role.uid" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="create seed assignment table" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Executing migration" logger=migrator id="add unique index builtin_role_role_name" 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="migrations completed" logger=migrator performed=381 skipped=0 duration=283.246694ms 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=warn msg="[Deprecated] the datasource provisioning config is outdated. please upgrade" logger=provisioning.datasources filename=/etc/grafana/provisioning/datasources/ceph-dashboard.yml 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-09T15:29:04.809 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:29:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:29:04+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: pgmap v29: 129 pgs: 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: osdmap e56: 8 total, 8 up, 8 in 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3793439051' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:04 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: pgmap v29: 129 pgs: 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: osdmap e56: 8 total, 8 up, 8 in 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3793439051' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: pgmap v29: 129 pgs: 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool create", "pool": "foo"}]': finished 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: osdmap e56: 8 total, 8 up, 8 in 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3793439051' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd=[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:04 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:05.700 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: osdmap e57: 8 total, 8 up, 8 in 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: Saving service rgw.foo spec with placement count:2 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: Deploying daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[49764]: osdmap e58: 8 total, 8 up, 8 in 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='client.? ' entity='client.admin' cmd='[{"prefix": "osd pool application enable","pool": "foo","app": "rbd"}]': finished 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: osdmap e57: 8 total, 8 up, 8 in 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: Saving service rgw.foo spec with placement count:2 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: Deploying daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:29:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:05 vm05 ceph-mon[54361]: osdmap e58: 8 total, 8 up, 8 in 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: osdmap e57: 8 total, 8 up, 8 in 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: pgmap v32: 161 pgs: 32 unknown, 129 active+clean; 453 KiB data, 52 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1.2 KiB/s wr, 3 op/s 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: Saving service rgw.foo spec with placement count:2 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: Deploying daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:29:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:05 vm09 ceph-mon[49358]: osdmap e58: 8 total, 8 up, 8 in 2026-03-09T15:29:06.636 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:29:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:06] "GET /metrics HTTP/1.1" 200 197469 "" "Prometheus/2.33.4" 2026-03-09T15:29:06.773 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch apply iscsi foo u p' 2026-03-09T15:29:07.446 INFO:teuthology.orchestra.run.vm05.stdout:Scheduled iscsi.foo update... 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: Deploying daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:29:07.461 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[49764]: osdmap e59: 8 total, 8 up, 8 in 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: Deploying daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:29:07.465 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:07 vm05 ceph-mon[54361]: osdmap e59: 8 total, 8 up, 8 in 2026-03-09T15:29:07.534 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 120' 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: Deploying daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:29:07.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:07 vm09 ceph-mon[49358]: osdmap e59: 8 total, 8 up, 8 in 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: pgmap v35: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 1022 B/s wr, 1 op/s 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: Saving service rgw.smpl spec with placement count:2 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: Deploying daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='client.24554 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: Saving service iscsi.foo spec with placement count:1 2026-03-09T15:29:08.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: pgmap v35: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 1022 B/s wr, 1 op/s 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: Saving service rgw.smpl spec with placement count:2 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: Deploying daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='client.24554 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: Saving service iscsi.foo spec with placement count:1 2026-03-09T15:29:08.397 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:08 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: pgmap v35: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 1022 B/s wr, 1 op/s 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: Saving service rgw.smpl spec with placement count:2 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: Deploying daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='client.24554 -' entity='client.admin' cmd=[{"prefix": "orch apply iscsi", "pool": "foo", "api_user": "u", "api_password": "p", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: Saving service iscsi.foo spec with placement count:1 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:08 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:08.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:29:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: Deploying daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:29:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:09 vm09 ceph-mon[49358]: pgmap v36: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 767 B/s wr, 1 op/s 2026-03-09T15:29:10.216 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: Deploying daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[49764]: pgmap v36: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 767 B/s wr, 1 op/s 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: Deploying daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:29:10.217 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:09 vm05 ceph-mon[54361]: pgmap v36: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 767 B/s wr, 1 op/s 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:11.423 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:11 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:11.480 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:11 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:11.480 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:11 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:11.480 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:11 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:11.480 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:11 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:12.937 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[49764]: pgmap v37: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 646 B/s wr, 1 op/s 2026-03-09T15:29:12.937 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:12.937 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:12.937 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[49764]: Checking dashboard <-> RGW credentials 2026-03-09T15:29:12.937 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:12.937 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[54361]: pgmap v37: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 646 B/s wr, 1 op/s 2026-03-09T15:29:12.938 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:12.938 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:12.938 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[54361]: Checking dashboard <-> RGW credentials 2026-03-09T15:29:12.938 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:12 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:12 vm09 ceph-mon[49358]: pgmap v37: 161 pgs: 161 active+clean; 453 KiB data, 54 MiB used, 160 GiB / 160 GiB avail; 646 B/s wr, 1 op/s 2026-03-09T15:29:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:12 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:12 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:12 vm09 ceph-mon[49358]: Checking dashboard <-> RGW credentials 2026-03-09T15:29:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:12 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.734 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: Checking pool "foo" exists for service iscsi.foo 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[54361]: Deploying daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: Checking pool "foo" exists for service iscsi.foo 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:13.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:13 vm05 ceph-mon[49764]: Deploying daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: Checking pool "foo" exists for service iscsi.foo 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]': finished 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:14.028 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:13 vm09 ceph-mon[49358]: Deploying daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: pgmap v38: 161 pgs: 161 active+clean; 456 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 5.1 KiB/s wr, 362 op/s 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: mgrmap e20: y(active, since 38s), standbys: x 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1929110007' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/420503528' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]: dispatch 2026-03-09T15:29:14.868 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]: dispatch 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: pgmap v38: 161 pgs: 161 active+clean; 456 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 5.1 KiB/s wr, 362 op/s 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: mgrmap e20: y(active, since 38s), standbys: x 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1929110007' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/420503528' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]: dispatch 2026-03-09T15:29:14.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:14 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]: dispatch 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: pgmap v38: 161 pgs: 161 active+clean; 456 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 203 KiB/s rd, 5.1 KiB/s wr, 362 op/s 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: mgrmap e20: y(active, since 38s), standbys: x 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1929110007' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/420503528' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]: dispatch 2026-03-09T15:29:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:14 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]: dispatch 2026-03-09T15:29:15.493 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 systemd[1]: Stopping Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:29:15.493 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 bash[76540]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager.a" found: no such container 2026-03-09T15:29:15.493 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[72090]: level=info ts=2026-03-09T15:29:15.460Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-09T15:29:15.493 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76548]: 2026-03-09 15:29:15.472950578 +0000 UTC m=+0.026155934 container died feb3f636917d5b5b83d3bc20762d625ac7a43529601fc5935cb0d1029226dba8 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:15.493 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76548]: 2026-03-09 15:29:15.492393571 +0000 UTC m=+0.045598927 container remove feb3f636917d5b5b83d3bc20762d625ac7a43529601fc5935cb0d1029226dba8 (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]': finished 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: osdmap e60: 8 total, 8 up, 8 in 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/720573857' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]: dispatch 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]: dispatch 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: pgmap v40: 161 pgs: 161 active+clean; 456 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 195 KiB/s rd, 4.4 KiB/s wr, 347 op/s 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: Reconfiguring daemon alertmanager.a on vm05 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]': finished 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: osdmap e60: 8 total, 8 up, 8 in 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/720573857' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]: dispatch 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]: dispatch 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: pgmap v40: 161 pgs: 161 active+clean; 456 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 195 KiB/s rd, 4.4 KiB/s wr, 347 op/s 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: Reconfiguring daemon alertmanager.a on vm05 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:15 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.804 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76548]: 2026-03-09 15:29:15.493623466 +0000 UTC m=+0.046828822 volume remove 681e2344bf206f4c2f7f95ee4c679b3f1a4d299b52e9293d8026ef57b658a309 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 bash[76548]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 bash[76568]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager.a" found: no such container 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@alertmanager.a.service: Deactivated successfully. 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 systemd[1]: Stopped Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 systemd[1]: Starting Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76609]: 2026-03-09 15:29:15.614941231 +0000 UTC m=+0.015407753 volume create ba4e936013d823314895a7a3c286425426345366db7de94e659df5ed47a8a7ed 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76609]: 2026-03-09 15:29:15.61750754 +0000 UTC m=+0.017974062 container create f0e454ec34d8b89f27d1b037be47d532d42b20ffd070a24b54dfb357e9c322fc (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76609]: 2026-03-09 15:29:15.642502408 +0000 UTC m=+0.042968940 container init f0e454ec34d8b89f27d1b037be47d532d42b20ffd070a24b54dfb357e9c322fc (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76609]: 2026-03-09 15:29:15.645328804 +0000 UTC m=+0.045795326 container start f0e454ec34d8b89f27d1b037be47d532d42b20ffd070a24b54dfb357e9c322fc (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 bash[76609]: f0e454ec34d8b89f27d1b037be47d532d42b20ffd070a24b54dfb357e9c322fc 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 podman[76609]: 2026-03-09 15:29:15.609233415 +0000 UTC m=+0.009699937 image pull ba2b418f427c0636d654de8757e830c80168e76482bcc46bb2138e569d6c91d4 quay.io/prometheus/alertmanager:v0.23.0 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 systemd[1]: Started Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.664Z caller=main.go:225 msg="Starting Alertmanager" version="(version=0.23.0, branch=HEAD, revision=61046b17771a57cfd4c4a51be370ab930a4d7d54)" 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.664Z caller=main.go:226 build_context="(go=go1.16.7, user=root@e21a959be8d2, date=20210825-10:48:55)" 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.665Z caller=cluster.go:184 component=cluster msg="setting advertise address explicitly" addr=192.168.123.105 port=9094 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.666Z caller=cluster.go:671 component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.698Z caller=coordinator.go:113 component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.699Z caller=coordinator.go:126 component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.703Z caller=main.go:518 msg=Listening address=:9093 2026-03-09T15:29:15.805 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:15 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:15.703Z caller=tls_config.go:191 msg="TLS is disabled." http2=false 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1724629666"}]': finished 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: osdmap e60: 8 total, 8 up, 8 in 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/720573857' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]: dispatch 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]: dispatch 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: pgmap v40: 161 pgs: 161 active+clean; 456 KiB data, 66 MiB used, 160 GiB / 160 GiB avail; 195 KiB/s rd, 4.4 KiB/s wr, 347 op/s 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: Reconfiguring daemon alertmanager.a on vm05 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:15.958 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:15 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 systemd[1]: Stopping Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 bash[67225]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus.a" found: no such container 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.125Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.125Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.125Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.125Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.125Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.125Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.126Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T15:29:16.301 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.126Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.126Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.126Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.126Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[64065]: ts=2026-03-09T15:29:16.126Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 podman[67233]: 2026-03-09 15:29:16.137177434 +0000 UTC m=+0.028953647 container died 238b579fef25670b2cf11a44acff7b59575447f29edb069a28074834e39c7d1c (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 podman[67233]: 2026-03-09 15:29:16.156055816 +0000 UTC m=+0.047832029 container remove 238b579fef25670b2cf11a44acff7b59575447f29edb069a28074834e39c7d1c (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 bash[67233]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 bash[67252]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus.a" found: no such container 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@prometheus.a.service: Deactivated successfully. 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 systemd[1]: Stopped Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:29:16.302 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 systemd[1]: Starting Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 podman[67295]: 2026-03-09 15:29:16.302016962 +0000 UTC m=+0.023451797 container create d95368d63cb2b6c8dbbdd0d041e93933a78be915b2de0ca6d2ff08b1b2d29399 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 podman[67295]: 2026-03-09 15:29:16.339033049 +0000 UTC m=+0.060467894 container init d95368d63cb2b6c8dbbdd0d041e93933a78be915b2de0ca6d2ff08b1b2d29399 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 podman[67295]: 2026-03-09 15:29:16.341867721 +0000 UTC m=+0.063302556 container start d95368d63cb2b6c8dbbdd0d041e93933a78be915b2de0ca6d2ff08b1b2d29399 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 bash[67295]: d95368d63cb2b6c8dbbdd0d041e93933a78be915b2de0ca6d2ff08b1b2d29399 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 podman[67295]: 2026-03-09 15:29:16.289898444 +0000 UTC m=+0.011333290 image pull 514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d quay.io/prometheus/prometheus:v2.33.4 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 systemd[1]: Started Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.374Z caller=main.go:475 level=info msg="No time or size retention was set so using the default time retention" duration=15d 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.374Z caller=main.go:512 level=info msg="Starting Prometheus" version="(version=2.33.4, branch=HEAD, revision=83032011a5d3e6102624fe58241a374a7201fee8)" 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.374Z caller=main.go:517 level=info build_context="(go=go1.17.7, user=root@d13bf69e7be8, date=20220222-16:51:28)" 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.374Z caller=main.go:518 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm09 (none))" 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.374Z caller=main.go:519 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.374Z caller=main.go:520 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.378Z caller=web.go:570 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.379Z caller=main.go:923 level=info msg="Starting TSDB ..." 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.382Z caller=tls_config.go:195 level=info component=web msg="TLS is disabled." http2=false 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.383Z caller=head.go:493 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.383Z caller=head.go:527 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.503µs 2026-03-09T15:29:16.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:16.383Z caller=head.go:533 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: Reconfiguring daemon prometheus.a on vm09 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]': finished 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: osdmap e61: 8 total, 8 up, 8 in 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3031014075' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.105:9093"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.105:9093"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.109:9095"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.109:9095"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.109:3000"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.109:3000"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]': finished 2026-03-09T15:29:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:16 vm09 ceph-mon[49358]: osdmap e62: 8 total, 8 up, 8 in 2026-03-09T15:29:17.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: Reconfiguring daemon prometheus.a on vm09 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]': finished 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: osdmap e61: 8 total, 8 up, 8 in 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3031014075' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.105:9093"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.105:9093"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.109:9095"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.109:9095"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.109:3000"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.109:3000"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]': finished 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[49764]: osdmap e62: 8 total, 8 up, 8 in 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: Reconfiguring daemon prometheus.a on vm09 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3424409228"}]': finished 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: osdmap e61: 8 total, 8 up, 8 in 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3031014075' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.105:9093"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://192.168.123.105:9093"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:29:17.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.109:9095"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://192.168.123.109:9095"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.109:3000"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://192.168.123.109:3000"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2003379400"}]': finished 2026-03-09T15:29:17.186 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:16 vm05 ceph-mon[54361]: osdmap e62: 8 total, 8 up, 8 in 2026-03-09T15:29:17.735 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:17.667Z caller=cluster.go:696 component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.000929641s 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.811Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=1 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.811Z caller=head.go:604 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=1 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.811Z caller=head.go:610 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=228.988µs wal_replay_duration=1.427927758s total_replay_duration=1.428168428s 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.813Z caller=main.go:944 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.813Z caller=main.go:947 level=info msg="TSDB started" 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.813Z caller=main.go:1128 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.826Z caller=main.go:1165 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=13.635656ms db_storage=531ns remote_storage=1.042µs web_handler=441ns query_engine=611ns scrape=640.819µs scrape_sd=23.413µs notify=19.556µs notify_sd=4.719µs rules=12.753836ms 2026-03-09T15:29:18.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:29:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:29:17.826Z caller=main.go:896 level=info msg="Server is ready to receive web requests." 2026-03-09T15:29:18.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:17 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3066680146' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]: dispatch 2026-03-09T15:29:18.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:17 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]: dispatch 2026-03-09T15:29:18.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:17 vm09 ceph-mon[49358]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 409 KiB/s rd, 6.7 KiB/s wr, 697 op/s 2026-03-09T15:29:18.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:17 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:18.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:17 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:18.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:17 vm09 ceph-mon[49358]: Checking dashboard <-> RGW credentials 2026-03-09T15:29:18.102 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3066680146' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]: dispatch 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]: dispatch 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[49764]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 409 KiB/s rd, 6.7 KiB/s wr, 697 op/s 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[49764]: Checking dashboard <-> RGW credentials 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3066680146' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]: dispatch 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]: dispatch 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[54361]: pgmap v43: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 409 KiB/s rd, 6.7 KiB/s wr, 697 op/s 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:18.103 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:17 vm05 ceph-mon[54361]: Checking dashboard <-> RGW credentials 2026-03-09T15:29:19.044 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:18 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]': finished 2026-03-09T15:29:19.044 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:18 vm09 ceph-mon[49358]: osdmap e63: 8 total, 8 up, 8 in 2026-03-09T15:29:19.044 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:18 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3655783261' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/954087458"}]: dispatch 2026-03-09T15:29:19.044 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:18 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:19.044 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:18 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:19.044 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:18 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]': finished 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[49764]: osdmap e63: 8 total, 8 up, 8 in 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3655783261' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/954087458"}]: dispatch 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3945943148"}]': finished 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[54361]: osdmap e63: 8 total, 8 up, 8 in 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3655783261' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/954087458"}]: dispatch 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:29:19.170 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:18 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3655783261' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/954087458"}]': finished 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: osdmap e64: 8 total, 8 up, 8 in 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 207 KiB/s rd, 767 B/s wr, 322 op/s 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1893571424' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]: dispatch 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]: dispatch 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3655783261' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/954087458"}]': finished 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: osdmap e64: 8 total, 8 up, 8 in 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 207 KiB/s rd, 767 B/s wr, 322 op/s 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1893571424' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]: dispatch 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]: dispatch 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:19 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3655783261' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/954087458"}]': finished 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: osdmap e64: 8 total, 8 up, 8 in 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail; 207 KiB/s rd, 767 B/s wr, 322 op/s 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1893571424' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]: dispatch 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]: dispatch 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:20.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:19 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:29:21.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:20 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]': finished 2026-03-09T15:29:21.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:20 vm05 ceph-mon[49764]: osdmap e65: 8 total, 8 up, 8 in 2026-03-09T15:29:21.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:20 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3193864549' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1659984683"}]: dispatch 2026-03-09T15:29:21.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:20 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]': finished 2026-03-09T15:29:21.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:20 vm05 ceph-mon[54361]: osdmap e65: 8 total, 8 up, 8 in 2026-03-09T15:29:21.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:20 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3193864549' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1659984683"}]: dispatch 2026-03-09T15:29:21.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:20 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/610584152"}]': finished 2026-03-09T15:29:21.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:20 vm09 ceph-mon[49358]: osdmap e65: 8 total, 8 up, 8 in 2026-03-09T15:29:21.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:20 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3193864549' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1659984683"}]: dispatch 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3193864549' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1659984683"}]': finished 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[49764]: osdmap e66: 8 total, 8 up, 8 in 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[49764]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2767968890' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]: dispatch 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]: dispatch 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3193864549' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1659984683"}]': finished 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[54361]: osdmap e66: 8 total, 8 up, 8 in 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[54361]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2767968890' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]: dispatch 2026-03-09T15:29:22.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:21 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]: dispatch 2026-03-09T15:29:22.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:21 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3193864549' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1659984683"}]': finished 2026-03-09T15:29:22.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:21 vm09 ceph-mon[49358]: osdmap e66: 8 total, 8 up, 8 in 2026-03-09T15:29:22.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:21 vm09 ceph-mon[49358]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 89 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:22.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:21 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2767968890' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]: dispatch 2026-03-09T15:29:22.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:21 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]: dispatch 2026-03-09T15:29:23.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:22 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]': finished 2026-03-09T15:29:23.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:22 vm05 ceph-mon[49764]: osdmap e67: 8 total, 8 up, 8 in 2026-03-09T15:29:23.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:22 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1321856518' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/649368777"}]: dispatch 2026-03-09T15:29:23.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:22 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]': finished 2026-03-09T15:29:23.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:22 vm05 ceph-mon[54361]: osdmap e67: 8 total, 8 up, 8 in 2026-03-09T15:29:23.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:22 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1321856518' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/649368777"}]: dispatch 2026-03-09T15:29:23.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:22 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3264305837"}]': finished 2026-03-09T15:29:23.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:22 vm09 ceph-mon[49358]: osdmap e67: 8 total, 8 up, 8 in 2026-03-09T15:29:23.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:22 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1321856518' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/649368777"}]: dispatch 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1321856518' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/649368777"}]': finished 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[49764]: osdmap e68: 8 total, 8 up, 8 in 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[49764]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2452692020' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1527221936"}]: dispatch 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1321856518' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/649368777"}]': finished 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[54361]: osdmap e68: 8 total, 8 up, 8 in 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[54361]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T15:29:24.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:23 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2452692020' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1527221936"}]: dispatch 2026-03-09T15:29:24.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:23 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1321856518' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/649368777"}]': finished 2026-03-09T15:29:24.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:23 vm09 ceph-mon[49358]: osdmap e68: 8 total, 8 up, 8 in 2026-03-09T15:29:24.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:23 vm09 ceph-mon[49358]: pgmap v52: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T15:29:24.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:23 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2452692020' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1527221936"}]: dispatch 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2452692020' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1527221936"}]': finished 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[49764]: osdmap e69: 8 total, 8 up, 8 in 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2273873311' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3424409228"}]: dispatch 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2452692020' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1527221936"}]': finished 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[54361]: osdmap e69: 8 total, 8 up, 8 in 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2273873311' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3424409228"}]: dispatch 2026-03-09T15:29:25.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:24 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:25.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:24 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2452692020' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1527221936"}]': finished 2026-03-09T15:29:25.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:24 vm09 ceph-mon[49358]: osdmap e69: 8 total, 8 up, 8 in 2026-03-09T15:29:25.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:24 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2273873311' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3424409228"}]: dispatch 2026-03-09T15:29:25.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:24 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2273873311' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3424409228"}]': finished 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[49764]: osdmap e70: 8 total, 8 up, 8 in 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[49764]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/370640876' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2276953556"}]: dispatch 2026-03-09T15:29:25.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:29:25.669Z caller=cluster.go:688 component=cluster msg="gossip settled; proceeding" elapsed=10.003687602s 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2273873311' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3424409228"}]': finished 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[54361]: osdmap e70: 8 total, 8 up, 8 in 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[54361]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T15:29:25.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:25 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/370640876' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2276953556"}]: dispatch 2026-03-09T15:29:26.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:25 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2273873311' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3424409228"}]': finished 2026-03-09T15:29:26.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:25 vm09 ceph-mon[49358]: osdmap e70: 8 total, 8 up, 8 in 2026-03-09T15:29:26.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:25 vm09 ceph-mon[49358]: pgmap v55: 161 pgs: 161 active+clean; 457 KiB data, 94 MiB used, 160 GiB / 160 GiB avail; 9.2 KiB/s rd, 9 op/s 2026-03-09T15:29:26.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:25 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/370640876' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2276953556"}]: dispatch 2026-03-09T15:29:26.734 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:29:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:26] "GET /metrics HTTP/1.1" 200 214462 "" "Prometheus/2.33.4" 2026-03-09T15:29:27.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:26 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/370640876' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2276953556"}]': finished 2026-03-09T15:29:27.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:26 vm05 ceph-mon[54361]: osdmap e71: 8 total, 8 up, 8 in 2026-03-09T15:29:27.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:26 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2131807745' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/814043816"}]: dispatch 2026-03-09T15:29:27.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:26 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/370640876' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2276953556"}]': finished 2026-03-09T15:29:27.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:26 vm05 ceph-mon[49764]: osdmap e71: 8 total, 8 up, 8 in 2026-03-09T15:29:27.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:26 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2131807745' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/814043816"}]: dispatch 2026-03-09T15:29:27.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:26 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/370640876' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2276953556"}]': finished 2026-03-09T15:29:27.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:26 vm09 ceph-mon[49358]: osdmap e71: 8 total, 8 up, 8 in 2026-03-09T15:29:27.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:26 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2131807745' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/814043816"}]: dispatch 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2131807745' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/814043816"}]': finished 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[54361]: osdmap e72: 8 total, 8 up, 8 in 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[54361]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1053010092' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]: dispatch 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]: dispatch 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2131807745' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/814043816"}]': finished 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[49764]: osdmap e72: 8 total, 8 up, 8 in 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[49764]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1053010092' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]: dispatch 2026-03-09T15:29:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:27 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]: dispatch 2026-03-09T15:29:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:27 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2131807745' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/814043816"}]': finished 2026-03-09T15:29:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:27 vm09 ceph-mon[49358]: osdmap e72: 8 total, 8 up, 8 in 2026-03-09T15:29:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:27 vm09 ceph-mon[49358]: pgmap v58: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:27 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1053010092' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]: dispatch 2026-03-09T15:29:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:27 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]: dispatch 2026-03-09T15:29:28.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:29:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]': finished 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[54361]: osdmap e73: 8 total, 8 up, 8 in 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1248347255' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]: dispatch 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]: dispatch 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]': finished 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[49764]: osdmap e73: 8 total, 8 up, 8 in 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1248347255' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]: dispatch 2026-03-09T15:29:29.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:28 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]: dispatch 2026-03-09T15:29:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:28 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/610584152"}]': finished 2026-03-09T15:29:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:28 vm09 ceph-mon[49358]: osdmap e73: 8 total, 8 up, 8 in 2026-03-09T15:29:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:28 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1248347255' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]: dispatch 2026-03-09T15:29:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:28 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]: dispatch 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]': finished 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[49764]: osdmap e74: 8 total, 8 up, 8 in 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[49764]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1761588712' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]: dispatch 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]: dispatch 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]': finished 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[54361]: osdmap e74: 8 total, 8 up, 8 in 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[54361]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1761588712' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]: dispatch 2026-03-09T15:29:30.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:29 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]: dispatch 2026-03-09T15:29:30.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:29 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/1499216893"}]': finished 2026-03-09T15:29:30.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:29 vm09 ceph-mon[49358]: osdmap e74: 8 total, 8 up, 8 in 2026-03-09T15:29:30.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:29 vm09 ceph-mon[49358]: pgmap v61: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:30.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:29 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1761588712' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]: dispatch 2026-03-09T15:29:30.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:29 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]: dispatch 2026-03-09T15:29:31.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:30 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]': finished 2026-03-09T15:29:31.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:30 vm05 ceph-mon[49764]: osdmap e75: 8 total, 8 up, 8 in 2026-03-09T15:29:31.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:30 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]': finished 2026-03-09T15:29:31.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:30 vm05 ceph-mon[54361]: osdmap e75: 8 total, 8 up, 8 in 2026-03-09T15:29:31.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:30 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/1499216893"}]': finished 2026-03-09T15:29:31.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:30 vm09 ceph-mon[49358]: osdmap e75: 8 total, 8 up, 8 in 2026-03-09T15:29:32.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:32 vm05 ceph-mon[49764]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:32.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:32 vm05 ceph-mon[54361]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:33.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:32 vm09 ceph-mon[49358]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 95 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:29:33.777 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:33.777 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:33.777 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:33.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:33.777 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:33.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:34.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:33 vm09 ceph-mon[49358]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:34.070 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:33 vm05 ceph-mon[49764]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:34.070 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:33 vm05 ceph-mon[54361]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:35.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:35 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:35.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:35 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:35.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:35 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 721 B/s rd, 0 op/s 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 721 B/s rd, 0 op/s 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:29:36.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 721 B/s rd, 0 op/s 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:29:36.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:29:36.734 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:29:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:36] "GET /metrics HTTP/1.1" 200 214453 "" "Prometheus/2.33.4" 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[49764]: osdmap e76: 8 total, 8 up, 8 in 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-09T15:29:37.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:37 vm05 ceph-mon[54361]: osdmap e76: 8 total, 8 up, 8 in 2026-03-09T15:29:37.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "2.1d", "id": [7, 2]}]': finished 2026-03-09T15:29:37.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.a", "id": [1, 2]}]': finished 2026-03-09T15:29:37.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.12", "id": [1, 5]}]': finished 2026-03-09T15:29:37.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:37 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "4.1c", "id": [1, 5]}]': finished 2026-03-09T15:29:37.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:37 vm09 ceph-mon[49358]: osdmap e76: 8 total, 8 up, 8 in 2026-03-09T15:29:38.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:38 vm05 ceph-mon[49764]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:38.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:38 vm05 ceph-mon[49764]: osdmap e77: 8 total, 8 up, 8 in 2026-03-09T15:29:38.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:38 vm05 ceph-mon[54361]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:38.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:38 vm05 ceph-mon[54361]: osdmap e77: 8 total, 8 up, 8 in 2026-03-09T15:29:38.555 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:38 vm09 ceph-mon[49358]: pgmap v67: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:38.555 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:38 vm09 ceph-mon[49358]: osdmap e77: 8 total, 8 up, 8 in 2026-03-09T15:29:38.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:29:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:29:40.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:40 vm05 ceph-mon[49764]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:40.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:40 vm05 ceph-mon[54361]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:40.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:40 vm09 ceph-mon[49358]: pgmap v69: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:42.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:42 vm05 ceph-mon[49764]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:29:42.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:42 vm05 ceph-mon[54361]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:29:43.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:42 vm09 ceph-mon[49358]: pgmap v70: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:29:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:29:43.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:29:43.505Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:43.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:43.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:43.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:43.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:44.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:44 vm05 ceph-mon[49764]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:44.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:44 vm05 ceph-mon[54361]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:44 vm09 ceph-mon[49358]: pgmap v71: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:45 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:45 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:45 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:46.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:46 vm05 ceph-mon[49764]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 576 B/s rd, 0 op/s 2026-03-09T15:29:46.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:29:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:46] "GET /metrics HTTP/1.1" 200 214453 "" "Prometheus/2.33.4" 2026-03-09T15:29:46.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:46 vm05 ceph-mon[54361]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 576 B/s rd, 0 op/s 2026-03-09T15:29:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:46 vm09 ceph-mon[49358]: pgmap v72: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 576 B/s rd, 0 op/s 2026-03-09T15:29:48.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:48 vm09 ceph-mon[49358]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:29:48.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:29:48 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:29:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:48 vm05 ceph-mon[49764]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:29:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:48 vm05 ceph-mon[54361]: pgmap v73: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:29:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:50 vm09 ceph-mon[49358]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 864 B/s rd, 0 op/s 2026-03-09T15:29:51.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:50 vm05 ceph-mon[49764]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 864 B/s rd, 0 op/s 2026-03-09T15:29:51.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:50 vm05 ceph-mon[54361]: pgmap v74: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 864 B/s rd, 0 op/s 2026-03-09T15:29:52.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:52 vm05 ceph-mon[49764]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:52.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:52 vm05 ceph-mon[54361]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:53.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:52 vm09 ceph-mon[49358]: pgmap v75: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:29:53.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:29:53.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:53.507Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:53.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:53.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:29:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:29:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:29:53.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:29:54.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:54 vm05 ceph-mon[49764]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:54.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:54 vm05 ceph-mon[54361]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:54 vm09 ceph-mon[49358]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:55.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:55 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:55.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:55 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:56.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:55 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:29:56.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:56 vm05 ceph-mon[49764]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:56.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:29:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:56] "GET /metrics HTTP/1.1" 200 214420 "" "Prometheus/2.33.4" 2026-03-09T15:29:56.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:56 vm05 ceph-mon[54361]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:57.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:56 vm09 ceph-mon[49358]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:29:58.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:29:58 vm09 ceph-mon[49358]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:58.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:29:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:29:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:29:58.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:29:58 vm05 ceph-mon[49764]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:29:58.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:29:58 vm05 ceph-mon[54361]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:00.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:00 vm05 ceph-mon[49764]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:00 vm05 ceph-mon[49764]: overall HEALTH_OK 2026-03-09T15:30:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:00 vm05 ceph-mon[54361]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:00 vm05 ceph-mon[54361]: overall HEALTH_OK 2026-03-09T15:30:01.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:00 vm09 ceph-mon[49358]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:01.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:00 vm09 ceph-mon[49358]: overall HEALTH_OK 2026-03-09T15:30:02.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:02 vm05 ceph-mon[49764]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:02.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:02 vm05 ceph-mon[54361]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:03.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:02 vm09 ceph-mon[49358]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:03.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:03.506Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:03.508Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:03.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:03.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:04 vm05 ceph-mon[49764]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:04 vm05 ceph-mon[54361]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:05.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:04 vm09 ceph-mon[49358]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:05.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:05 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:05.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:05 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:05 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:06.673 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:30:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:06] "GET /metrics HTTP/1.1" 200 214424 "" "Prometheus/2.33.4" 2026-03-09T15:30:06.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:06 vm05 ceph-mon[49764]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:06.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:06 vm05 ceph-mon[54361]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:07.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:06 vm09 ceph-mon[49358]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:08.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:30:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:30:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:08 vm09 ceph-mon[49358]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:08 vm05 ceph-mon[49764]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:08 vm05 ceph-mon[54361]: pgmap v83: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:09.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:09 vm05 ceph-mon[49764]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:09.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:09 vm05 ceph-mon[54361]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:09 vm09 ceph-mon[49358]: pgmap v84: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:12.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:12 vm05 ceph-mon[49764]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:12 vm05 ceph-mon[54361]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:12 vm09 ceph-mon[49358]: pgmap v85: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:13.984 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:13.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:13.507Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:13.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:13.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:13.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:14.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:14 vm05 ceph-mon[49764]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:14.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:14 vm05 ceph-mon[54361]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:14 vm09 ceph-mon[49358]: pgmap v86: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:15.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:15 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:15.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:15 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:16.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:15 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:16.734 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:16 vm05 ceph-mon[49764]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:16.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:30:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:16] "GET /metrics HTTP/1.1" 200 214424 "" "Prometheus/2.33.4" 2026-03-09T15:30:16.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:16 vm05 ceph-mon[54361]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:17.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:16 vm09 ceph-mon[49358]: pgmap v87: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:18.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:30:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:30:18.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:18 vm09 ceph-mon[49358]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:18.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:18 vm05 ceph-mon[49764]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:18.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:18 vm05 ceph-mon[54361]: pgmap v88: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:19.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:19 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:30:19.948 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:19 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:30:19.963 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:19 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:30:19.963 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:19 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:30:19.964 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:19 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:30:19.964 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:19 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:30:20.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[49764]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[54361]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:20.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:20 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:21.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:20 vm09 ceph-mon[49358]: pgmap v89: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:21.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:20 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:21.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:20 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:21.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:20 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:30:22.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:22 vm05 ceph-mon[49764]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:22.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:22 vm05 ceph-mon[54361]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:23.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:22 vm09 ceph-mon[49358]: pgmap v90: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:23.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:23.508Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:23.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:23.510Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:23.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:23.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:24.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:24 vm05 ceph-mon[49764]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:24.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:24 vm05 ceph-mon[54361]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:25.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:24 vm09 ceph-mon[49358]: pgmap v91: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:25.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:25 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:25.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:25 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:26.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:25 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:26.677 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:30:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:26] "GET /metrics HTTP/1.1" 200 214407 "" "Prometheus/2.33.4" 2026-03-09T15:30:26.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:26 vm05 ceph-mon[49764]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:26.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:26 vm05 ceph-mon[54361]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:27.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:26 vm09 ceph-mon[49358]: pgmap v92: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:28.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:28 vm09 ceph-mon[49358]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:28.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:30:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:30:28.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:28 vm05 ceph-mon[49764]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:28.984 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:28 vm05 ceph-mon[54361]: pgmap v93: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:30.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:30 vm05 ceph-mon[49764]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:30.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:30 vm05 ceph-mon[54361]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:31.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:30 vm09 ceph-mon[49358]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:32.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:32 vm05 ceph-mon[49764]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:32.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:32 vm05 ceph-mon[54361]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:33.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:32 vm09 ceph-mon[49358]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:33.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:33.509Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:33.511Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:33.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:34.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:34 vm05 ceph-mon[49764]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:34.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:34 vm05 ceph-mon[54361]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:35.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:34 vm09 ceph-mon[49358]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:30:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:30:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:35 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:30:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:30:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:30:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:30:36.684 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:30:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:36] "GET /metrics HTTP/1.1" 200 214421 "" "Prometheus/2.33.4" 2026-03-09T15:30:36.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:36 vm05 ceph-mon[49764]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:36.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:36 vm05 ceph-mon[54361]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:37.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:36 vm09 ceph-mon[49358]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:38.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:38 vm09 ceph-mon[49358]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:38.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:30:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:30:38.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:38 vm05 ceph-mon[49764]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:38.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:38 vm05 ceph-mon[54361]: pgmap v98: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:39.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:39 vm05 ceph-mon[49764]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:39.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:39 vm05 ceph-mon[54361]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:40.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:39 vm09 ceph-mon[49358]: pgmap v99: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:42.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:42 vm05 ceph-mon[49764]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:42.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:42 vm05 ceph-mon[54361]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:43.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:42 vm09 ceph-mon[49358]: pgmap v100: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:43.510Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:43.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:43.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:43.512Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:43.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:43.513Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:44.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:44 vm05 ceph-mon[49764]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:44.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:44 vm05 ceph-mon[54361]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:44 vm09 ceph-mon[49358]: pgmap v101: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:45 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:45 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:45 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:46.653 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:30:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:46] "GET /metrics HTTP/1.1" 200 214421 "" "Prometheus/2.33.4" 2026-03-09T15:30:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:46 vm05 ceph-mon[49764]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:46 vm05 ceph-mon[54361]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:46 vm09 ceph-mon[49358]: pgmap v102: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:48.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:48 vm09 ceph-mon[49358]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:48.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:30:48 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:30:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:48 vm05 ceph-mon[49764]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:48 vm05 ceph-mon[54361]: pgmap v103: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:50.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:50 vm05 ceph-mon[49764]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:50.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:50 vm05 ceph-mon[54361]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:50 vm09 ceph-mon[49358]: pgmap v104: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:52.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:52 vm05 ceph-mon[49764]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:52.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:52 vm05 ceph-mon[54361]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:53.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:52 vm09 ceph-mon[49358]: pgmap v105: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:53.511Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:30:53.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:53.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:53.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:30:53.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:30:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:30:53.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:30:54.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:54 vm05 ceph-mon[49764]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:54.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:54 vm05 ceph-mon[54361]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:54 vm09 ceph-mon[49358]: pgmap v106: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:55.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:55 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:55.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:55 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:56.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:55 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:30:56.705 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:30:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:56] "GET /metrics HTTP/1.1" 200 214442 "" "Prometheus/2.33.4" 2026-03-09T15:30:56.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:56 vm05 ceph-mon[49764]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:56.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:56 vm05 ceph-mon[54361]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:57.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:56 vm09 ceph-mon[49358]: pgmap v107: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:30:57.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:30:57 vm05 ceph-mon[49764]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:57.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:30:57 vm05 ceph-mon[54361]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:58.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:30:57 vm09 ceph-mon[49358]: pgmap v108: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:30:58.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:30:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:30:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:00.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:00 vm05 ceph-mon[49764]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:00.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:00 vm05 ceph-mon[54361]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:00.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:00 vm09 ceph-mon[49358]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:02.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:02 vm05 ceph-mon[49764]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:02.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:02 vm05 ceph-mon[54361]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:03.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:02 vm09 ceph-mon[49358]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:03.512Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:03.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:03.514Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:03.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:04 vm05 ceph-mon[49764]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:04 vm05 ceph-mon[54361]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:05.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:04 vm09 ceph-mon[49358]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:05.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:05 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:05.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:05 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:05 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:06.657 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:31:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:06] "GET /metrics HTTP/1.1" 200 214437 "" "Prometheus/2.33.4" 2026-03-09T15:31:06.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:06 vm05 ceph-mon[49764]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:06.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:06 vm05 ceph-mon[54361]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:07.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:06 vm09 ceph-mon[49358]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:08.197 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force' 2026-03-09T15:31:08.747 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force' 2026-03-09T15:31:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:08 vm09 ceph-mon[49358]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:08.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:31:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:08.930 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:08 vm05 ceph-mon[54361]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:08.931 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:08 vm05 ceph-mon[49764]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:09.320 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph config set global log_to_journald false --force' 2026-03-09T15:31:09.878 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1' 2026-03-09T15:31:10.408 INFO:teuthology.orchestra.run.vm05.stdout:Initiating upgrade to quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:10.490 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T15:31:10.492 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm05.local 2026-03-09T15:31:10.492 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'while ceph orch upgrade status | jq '"'"'.in_progress'"'"' | grep true && ! ceph orch upgrade status | jq '"'"'.message'"'"' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; ceph health detail ; sleep 30 ; done' 2026-03-09T15:31:10.672 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[49764]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:10.673 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:31:10.673 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:31:10.673 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:31:10.673 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:31:10.675 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[54361]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:10.675 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:31:10.675 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:31:10.675 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:31:10.675 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:10 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:31:11.059 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:31:11.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:10 vm09 ceph-mon[49358]: pgmap v114: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:11.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:10 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:31:11.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:10 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:31:11.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:10 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:31:11.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:10 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (115s) 51s ago 2m 18.7M - ba2b418f427c f0e454ec34d8 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (2m) 51s ago 2m 48.8M - 8.3.5 dad864ee21e9 3727d7279dc9 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (117s) 51s ago 117s 45.5M - 3.5 e1d6a67b021e ed0541340202 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443 running (3m) 51s ago 3m 418M - 17.2.0 e1d6a67b021e e6ea01192c0c 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:9283 running (4m) 51s ago 4m 468M - 17.2.0 e1d6a67b021e 25a6783f54e5 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (4m) 51s ago 4m 54.6M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (4m) 51s ago 4m 50.7M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (4m) 51s ago 4m 49.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (2m) 51s ago 2m 18.9M - 1dbe0e931976 9e0bcae9a93c 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (2m) 51s ago 2m 13.7M - 1dbe0e931976 b80da7ef9167 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (3m) 51s ago 3m 51.4M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (3m) 51s ago 3m 49.7M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (3m) 51s ago 3m 46.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:31:11.446 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (3m) 51s ago 3m 52.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (3m) 51s ago 3m 49.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (3m) 51s ago 3m 47.0M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (2m) 51s ago 2m 47.4M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (2m) 51s ago 2m 48.9M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (115s) 51s ago 2m 47.8M - 514e6a882f6e d95368d63cb2 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (2m) 51s ago 2m 89.2M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (2m) 51s ago 2m 89.2M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (2m) 51s ago 2m 90.5M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:31:11.447 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (2m) 51s ago 2m 87.8M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:11.666 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:31:11.667 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:31:11.724 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[49764]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:11.724 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[49764]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:11.724 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[49764]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:11.724 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/541918830' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "", 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image" 2026-03-09T15:31:11.863 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:31:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[54361]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[54361]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[54361]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:11.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:11 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/541918830' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:31:12.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:11 vm09 ceph-mon[49358]: from='client.24751 -' entity='client.admin' cmd=[{"prefix": "orch upgrade start", "image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:11 vm09 ceph-mon[49358]: Upgrade: Started with target quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:12.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:11 vm09 ceph-mon[49358]: Upgrade: First pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:31:12.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:11 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/541918830' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:31:12.078 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[49764]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[49764]: from='client.14874 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[49764]: from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[49764]: from='client.24760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[49764]: from='client.24769 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/725498104' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[54361]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[54361]: from='client.14874 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[54361]: from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[54361]: from='client.24760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[54361]: from='client.24769 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:12.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:12 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/725498104' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:31:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:12 vm09 ceph-mon[49358]: pgmap v115: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:12 vm09 ceph-mon[49358]: from='client.14874 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:12 vm09 ceph-mon[49358]: from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:12 vm09 ceph-mon[49358]: from='client.24760 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:12 vm09 ceph-mon[49358]: from='client.24769 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:13.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:12 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/725498104' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:31:13.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-mon[49764]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:13.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:13.513Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:13.515Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:13.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:13.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:13.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:13 vm05 ceph-mon[54361]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:14.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:13 vm09 ceph-mon[49358]: pgmap v116: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:14 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:15.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:14 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:15.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:14 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:16.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:15 vm09 ceph-mon[49358]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:16.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:15 vm05 ceph-mon[49764]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:16.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:15 vm05 ceph-mon[54361]: pgmap v117: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:16.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:31:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:16] "GET /metrics HTTP/1.1" 200 214437 "" "Prometheus/2.33.4" 2026-03-09T15:31:18.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:18 vm09 ceph-mon[49358]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:18.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:31:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:18.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:18 vm05 ceph-mon[49764]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:18.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:18 vm05 ceph-mon[54361]: pgmap v118: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:20.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:20 vm05 ceph-mon[49764]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:20.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:20 vm05 ceph-mon[54361]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:21.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:20 vm09 ceph-mon[49358]: pgmap v119: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:22.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:22 vm05 ceph-mon[49764]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:22.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:22 vm05 ceph-mon[54361]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:23.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:22 vm09 ceph-mon[49358]: pgmap v120: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:23.514Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:23.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:23.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:23.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:23.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:24.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:24 vm09 ceph-mon[49358]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:24.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:24 vm05 ceph-mon[54361]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:24.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:24 vm05 ceph-mon[49764]: pgmap v121: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:25.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:25 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:25.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:25 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:25.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:25 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:26.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:26 vm09 ceph-mon[49358]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:26.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:26 vm05 ceph-mon[49764]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:26.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:26 vm05 ceph-mon[54361]: pgmap v122: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:26.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:31:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:26] "GET /metrics HTTP/1.1" 200 214445 "" "Prometheus/2.33.4" 2026-03-09T15:31:28.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:28 vm09 ceph-mon[49358]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:28.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:31:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:28.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:28 vm05 ceph-mon[49764]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:28.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:28 vm05 ceph-mon[54361]: pgmap v123: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:30.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:30 vm05 ceph-mon[49764]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:30.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:30 vm05 ceph-mon[54361]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:31.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:30 vm09 ceph-mon[49358]: pgmap v124: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:32.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:32 vm05 ceph-mon[49764]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:32.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:32 vm05 ceph-mon[54361]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:33.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:32 vm09 ceph-mon[49358]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:33.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:33.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:33.516Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:33.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:33.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:34.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:34 vm05 ceph-mon[49764]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:34.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:34 vm05 ceph-mon[54361]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:35.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:34 vm09 ceph-mon[49358]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:31:35.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:31:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:35 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:31:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:31:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:31:36.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:31:36.692 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:31:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:36] "GET /metrics HTTP/1.1" 200 214411 "" "Prometheus/2.33.4" 2026-03-09T15:31:36.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:36 vm05 ceph-mon[49764]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:36.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:36 vm05 ceph-mon[54361]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:37.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:36 vm09 ceph-mon[49358]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:38.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:31:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:38.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:38 vm09 ceph-mon[49358]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:38.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:38 vm05 ceph-mon[49764]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:38.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:38 vm05 ceph-mon[54361]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:40.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:40 vm05 ceph-mon[49764]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:40.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:40 vm05 ceph-mon[54361]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:41.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:40 vm09 ceph-mon[49358]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:42.300 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:31:42.660 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:42 vm05 ceph-mon[49764]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (2m) 82s ago 2m 18.7M - ba2b418f427c f0e454ec34d8 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (2m) 82s ago 2m 48.8M - 8.3.5 dad864ee21e9 3727d7279dc9 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (2m) 82s ago 2m 45.5M - 3.5 e1d6a67b021e ed0541340202 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443 running (4m) 82s ago 4m 418M - 17.2.0 e1d6a67b021e e6ea01192c0c 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:9283 running (5m) 82s ago 5m 468M - 17.2.0 e1d6a67b021e 25a6783f54e5 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (5m) 82s ago 5m 54.6M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (4m) 82s ago 4m 50.7M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (4m) 82s ago 4m 49.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (3m) 82s ago 3m 18.9M - 1dbe0e931976 9e0bcae9a93c 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (3m) 82s ago 3m 13.7M - 1dbe0e931976 b80da7ef9167 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (4m) 82s ago 4m 51.4M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (4m) 82s ago 4m 49.7M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (4m) 82s ago 4m 46.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (3m) 82s ago 3m 52.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (3m) 82s ago 3m 49.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (3m) 82s ago 3m 47.0M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (3m) 82s ago 3m 47.4M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (3m) 82s ago 3m 48.9M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (2m) 82s ago 2m 47.8M - 514e6a882f6e d95368d63cb2 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (2m) 82s ago 2m 89.2M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (2m) 82s ago 2m 89.2M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (2m) 82s ago 2m 90.5M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:31:42.695 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (2m) 82s ago 2m 87.8M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:31:42.923 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:42 vm05 ceph-mon[54361]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:42.924 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:31:42.924 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:31:42.924 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:31:42.924 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:42.924 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:31:42.925 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:31:43.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:42 vm09 ceph-mon[49358]: pgmap v130: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:43.113 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:31:43.113 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:31:43.114 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:31:43.114 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:31:43.114 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "", 2026-03-09T15:31:43.114 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image" 2026-03-09T15:31:43.114 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:31:43.325 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:31:43.667 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[49764]: from='client.24779 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:43.667 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[49764]: from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:43.667 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[49764]: from='client.14910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:43.667 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2298524526' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:31:43.667 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1509834655' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:31:43.668 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:43.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:43.668 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:43.515Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:43.668 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:43.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:43.668 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:43.517Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:43.668 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:43.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:43.668 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:43.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:43.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[54361]: from='client.24779 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:43.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[54361]: from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:43.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[54361]: from='client.14910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:43.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2298524526' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:31:43.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:43 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1509834655' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:31:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:43 vm09 ceph-mon[49358]: from='client.24779 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:43 vm09 ceph-mon[49358]: from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:43 vm09 ceph-mon[49358]: from='client.14910 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:43 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2298524526' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:31:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:43 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1509834655' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:31:44.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:44 vm05 ceph-mon[49764]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:44.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:44 vm05 ceph-mon[49764]: from='client.14922 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:44.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:44 vm05 ceph-mon[54361]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:44.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:44 vm05 ceph-mon[54361]: from='client.14922 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:44 vm09 ceph-mon[49358]: pgmap v131: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:44 vm09 ceph-mon[49358]: from='client.14922 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:31:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:45 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:45 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:45 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:46.678 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:31:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:46] "GET /metrics HTTP/1.1" 200 214411 "" "Prometheus/2.33.4" 2026-03-09T15:31:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:46 vm05 ceph-mon[49764]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:46 vm05 ceph-mon[54361]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:46 vm09 ceph-mon[49358]: pgmap v132: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:48.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:31:48 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:48.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:48 vm09 ceph-mon[49358]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:48 vm05 ceph-mon[49764]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:48 vm05 ceph-mon[54361]: pgmap v133: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:49.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:49 vm05 ceph-mon[49764]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:49.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:49 vm05 ceph-mon[54361]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:50.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:49 vm09 ceph-mon[49358]: pgmap v134: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:52.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:52 vm05 ceph-mon[49764]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:52.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:52 vm05 ceph-mon[54361]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:53.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:52 vm09 ceph-mon[49358]: pgmap v135: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:53.883 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:53.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:53.883 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:31:53.516Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:53.883 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:53.883 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:53.518Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:53.883 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:53.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:31:53.883 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:31:53.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:31:54.189 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:53 vm05 ceph-mon[49764]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:54.189 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:53 vm05 ceph-mon[54361]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:54.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:53 vm09 ceph-mon[49358]: pgmap v136: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:55.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:54 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:55.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:54 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:55.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:54 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:31:56.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:55 vm05 ceph-mon[49764]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:56.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:55 vm05 ceph-mon[54361]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:56.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:55 vm09 ceph-mon[49358]: pgmap v137: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:31:56.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:31:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:56] "GET /metrics HTTP/1.1" 200 214376 "" "Prometheus/2.33.4" 2026-03-09T15:31:58.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:31:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:31:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:31:58.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:31:58 vm09 ceph-mon[49358]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:58.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:31:58 vm05 ceph-mon[49764]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:31:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:31:58 vm05 ceph-mon[54361]: pgmap v138: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:00 vm05 ceph-mon[49764]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:00 vm05 ceph-mon[54361]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:01.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:00 vm09 ceph-mon[49358]: pgmap v139: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:02.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:02 vm05 ceph-mon[49764]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:02.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:02 vm05 ceph-mon[54361]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:03.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:02 vm09 ceph-mon[49358]: pgmap v140: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:03.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:03.518Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:03.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:03.520Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:03.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:03.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:04 vm05 ceph-mon[49764]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:04.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:04 vm05 ceph-mon[54361]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:05.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:04 vm09 ceph-mon[49358]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:05.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:05 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:05.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:05 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:06.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:05 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:06.697 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:06 vm05 ceph-mon[49764]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:06.697 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:32:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:06] "GET /metrics HTTP/1.1" 200 214383 "" "Prometheus/2.33.4" 2026-03-09T15:32:06.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:06 vm05 ceph-mon[54361]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:07.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:06 vm09 ceph-mon[49358]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:08.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:32:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:32:08.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:08 vm09 ceph-mon[49358]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:08 vm05 ceph-mon[49764]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:08 vm05 ceph-mon[54361]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:10.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:09 vm09 ceph-mon[49358]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:10.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:09 vm05 ceph-mon[49764]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:10.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:09 vm05 ceph-mon[54361]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:12.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:12 vm05 ceph-mon[49764]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:12.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:12 vm05 ceph-mon[54361]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:12.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:12 vm09 ceph-mon[49358]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:13.561 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (2m) 113s ago 3m 18.7M - ba2b418f427c f0e454ec34d8 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (3m) 113s ago 3m 48.8M - 8.3.5 dad864ee21e9 3727d7279dc9 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (3m) 113s ago 3m 45.5M - 3.5 e1d6a67b021e ed0541340202 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443 running (4m) 113s ago 4m 418M - 17.2.0 e1d6a67b021e e6ea01192c0c 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:9283 running (5m) 113s ago 5m 468M - 17.2.0 e1d6a67b021e 25a6783f54e5 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (5m) 113s ago 5m 54.6M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (5m) 113s ago 5m 50.7M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (5m) 113s ago 5m 49.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (3m) 113s ago 3m 18.9M - 1dbe0e931976 9e0bcae9a93c 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (3m) 113s ago 3m 13.7M - 1dbe0e931976 b80da7ef9167 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (4m) 113s ago 4m 51.4M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (4m) 113s ago 4m 49.7M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (4m) 113s ago 4m 46.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (4m) 113s ago 4m 52.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (4m) 113s ago 4m 49.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (4m) 113s ago 4m 47.0M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (3m) 113s ago 3m 47.4M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (3m) 113s ago 3m 48.9M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (2m) 113s ago 3m 47.8M - 514e6a882f6e d95368d63cb2 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (3m) 113s ago 3m 89.2M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (3m) 113s ago 3m 89.2M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (3m) 113s ago 3m 90.5M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:32:13.986 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (3m) 113s ago 3m 87.8M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:32:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:13.517Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:13.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:13.519Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:13.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:14.265 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:32:14.266 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "", 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Doing first pull of quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image" 2026-03-09T15:32:14.482 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:32:14.961 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:32:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:14 vm09 ceph-mon[49358]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:14 vm09 ceph-mon[49358]: from='client.24808 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:15.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:14 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2873284894' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:32:15.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:14 vm05 ceph-mon[49764]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:15.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:14 vm05 ceph-mon[49764]: from='client.24808 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:15.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:14 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2873284894' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:32:15.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:14 vm05 ceph-mon[54361]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:15.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:14 vm05 ceph-mon[54361]: from='client.24808 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:15.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:14 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2873284894' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:32:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:16 vm09 ceph-mon[49358]: from='client.14934 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:16 vm09 ceph-mon[49358]: from='client.24814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:16 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:16 vm09 ceph-mon[49358]: from='client.24820 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:16 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2972551140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:32:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:16 vm09 ceph-mon[49358]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:16.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[49764]: from='client.14934 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[49764]: from='client.24814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[49764]: from='client.24820 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2972551140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[49764]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[54361]: from='client.14934 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[54361]: from='client.24814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[54361]: from='client.24820 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2972551140' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:32:16.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:16 vm05 ceph-mon[54361]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:16.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:32:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:16] "GET /metrics HTTP/1.1" 200 214383 "" "Prometheus/2.33.4" 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-09T15:32:17.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:17 vm09 ceph-mon[49358]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm09 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-09T15:32:17.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[49764]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm09 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: Upgrade: Target is version 19.2.3-678-ge911bdeb (unknown) 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: Upgrade: Target container is quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, digests ['quay.ceph.io/ceph-ci/ceph@sha256:8fda260ab1d2d3118a1622f7df75f44f285dfe74e71793626152a711c12bf2cc'] 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-09T15:32:17.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:17 vm05 ceph-mon[54361]: Upgrade: Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df on vm09 2026-03-09T15:32:18.557 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:18 vm09 ceph-mon[49358]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:18.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:18 vm05 ceph-mon[49764]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:18.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:18 vm05 ceph-mon[54361]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:18.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:32:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:32:21.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:20 vm05 ceph-mon[49764]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:21.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:20 vm05 ceph-mon[54361]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:21.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:20 vm09 ceph-mon[49358]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:21 vm05 ceph-mon[49764]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:22.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:21 vm05 ceph-mon[54361]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:22.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:21 vm09 ceph-mon[49358]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:23.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:23.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:23.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:23.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:23.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:23.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:24.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:24 vm05 ceph-mon[49764]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:24.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:24 vm05 ceph-mon[54361]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:24.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:24 vm09 ceph-mon[49358]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:25.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:25 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:25.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:25 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:25.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:25 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:26.393 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:26 vm05 ceph-mon[49764]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:26.393 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:26 vm05 ceph-mon[54361]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:26.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:26 vm09 ceph-mon[49358]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:26.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:32:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:26] "GET /metrics HTTP/1.1" 200 214382 "" "Prometheus/2.33.4" 2026-03-09T15:32:28.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:27 vm05 ceph-mon[49764]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:28.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:27 vm05 ceph-mon[54361]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:28.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:27 vm09 ceph-mon[49358]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:28.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:32:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:32:30.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:30 vm05 ceph-mon[49764]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:30 vm05 ceph-mon[54361]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:30.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:30 vm09 ceph-mon[49358]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:32.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:32 vm05 ceph-mon[49764]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:32.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:32 vm05 ceph-mon[54361]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:32.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:32 vm09 ceph-mon[49358]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:33.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:33.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:33.519Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:33.521Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:33.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:33.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:33.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:34.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:34 vm05 ceph-mon[49764]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:34.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:34 vm05 ceph-mon[54361]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:34.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:34 vm09 ceph-mon[49358]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:32:35.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:32:35.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:35 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:35.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:32:35.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:32:35.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:32:35.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:32:36.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:36 vm05 ceph-mon[49764]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:36.485 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:32:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:36] "GET /metrics HTTP/1.1" 200 214385 "" "Prometheus/2.33.4" 2026-03-09T15:32:36.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:36 vm05 ceph-mon[54361]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:36.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:36 vm09 ceph-mon[49358]: pgmap v157: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:38.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:37 vm05 ceph-mon[49764]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:38.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:37 vm05 ceph-mon[54361]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:38.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:37 vm09 ceph-mon[49358]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:38.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:32:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:38] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:32:40.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:40 vm05 ceph-mon[49764]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:40.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:40 vm05 ceph-mon[54361]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:40.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:40 vm09 ceph-mon[49358]: pgmap v159: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:42 vm05 ceph-mon[49764]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:42 vm05 ceph-mon[54361]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:42.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:42 vm09 ceph-mon[49358]: pgmap v160: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:43.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:43.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:43.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:43.522Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:43.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:43.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:44.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:44 vm05 ceph-mon[49764]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:44.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:44 vm05 ceph-mon[54361]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:44.561 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:44 vm09 ceph-mon[49358]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:45.188 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:32:45.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:45 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:45.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:45 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:45.561 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:32:45.561 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (3m) 2m ago 3m 18.7M - ba2b418f427c f0e454ec34d8 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (3m) 2m ago 3m 48.8M - 8.3.5 dad864ee21e9 3727d7279dc9 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (3m) 2m ago 3m 45.5M - 3.5 e1d6a67b021e ed0541340202 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443 running (5m) 2m ago 5m 418M - 17.2.0 e1d6a67b021e e6ea01192c0c 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:9283 running (6m) 2m ago 6m 468M - 17.2.0 e1d6a67b021e 25a6783f54e5 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (6m) 2m ago 6m 54.6M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (5m) 2m ago 5m 50.7M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (5m) 2m ago 5m 49.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (4m) 2m ago 4m 18.9M - 1dbe0e931976 9e0bcae9a93c 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (4m) 2m ago 4m 13.7M - 1dbe0e931976 b80da7ef9167 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (5m) 2m ago 5m 51.4M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (5m) 2m ago 5m 49.7M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (5m) 2m ago 5m 46.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (4m) 2m ago 4m 52.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (4m) 2m ago 4m 49.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (4m) 2m ago 4m 47.0M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (4m) 2m ago 4m 47.4M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (4m) 2m ago 4m 48.9M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (3m) 2m ago 3m 47.8M - 514e6a882f6e d95368d63cb2 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (3m) 2m ago 3m 89.2M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (3m) 2m ago 3m 89.2M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (3m) 2m ago 3m 90.5M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:32:45.562 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (3m) 2m ago 3m 87.8M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:32:45.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:45 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T15:32:45.786 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:32:45.787 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "0/23 daemons upgraded", 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image on host vm09" 2026-03-09T15:32:45.995 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:32:46.229 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:32:46.394 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:32:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:46] "GET /metrics HTTP/1.1" 200 214385 "" "Prometheus/2.33.4" 2026-03-09T15:32:46.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[54361]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:46.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[54361]: from='client.24832 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.394 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[54361]: from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[54361]: from='client.14958 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2831033825' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[49764]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[49764]: from='client.24832 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[49764]: from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[49764]: from='client.14958 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.395 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:46 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2831033825' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:32:46.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:46 vm09 ceph-mon[49358]: pgmap v162: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:46.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:46 vm09 ceph-mon[49358]: from='client.24832 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:46 vm09 ceph-mon[49358]: from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:46 vm09 ceph-mon[49358]: from='client.14958 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:46.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:46 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2831033825' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:32:47.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:47 vm09 ceph-mon[49358]: from='client.14970 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:47.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:47 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1142947371' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:32:47.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:47 vm05 ceph-mon[49764]: from='client.14970 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:47.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:47 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1142947371' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:32:47.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:47 vm05 ceph-mon[54361]: from='client.14970 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:32:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:47 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1142947371' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:32:48.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:32:48 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:48] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:32:48.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:48 vm09 ceph-mon[49358]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:48 vm05 ceph-mon[49764]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:48.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:48 vm05 ceph-mon[54361]: pgmap v163: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:50.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:50 vm05 ceph-mon[49764]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:50.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:50 vm05 ceph-mon[54361]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:50 vm09 ceph-mon[49358]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:52.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:52 vm05 ceph-mon[49764]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:52.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:52 vm05 ceph-mon[54361]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:53.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:52 vm09 ceph-mon[49358]: pgmap v165: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:53.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:53.520Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:53.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:32:53.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:53.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:53.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:53.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:53.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:53.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:53.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:32:53.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:32:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:32:53.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:32:54.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:54 vm05 ceph-mon[49764]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:54.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:54 vm05 ceph-mon[54361]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:55.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:54 vm09 ceph-mon[49358]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:55.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:55 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:55.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:55 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:55 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:32:56.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:56 vm05 ceph-mon[49764]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:56.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:32:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:56] "GET /metrics HTTP/1.1" 200 214381 "" "Prometheus/2.33.4" 2026-03-09T15:32:56.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:56 vm05 ceph-mon[54361]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:57.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:56 vm09 ceph-mon[49358]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:32:58.811 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:32:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:32:58] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:32:58.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:32:58 vm09 ceph-mon[49358]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:58.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:32:58 vm05 ceph-mon[49764]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:32:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:32:58 vm05 ceph-mon[54361]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:00.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:00 vm05 ceph-mon[49764]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:00.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:00 vm05 ceph-mon[54361]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:00 vm09 ceph-mon[49358]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:02.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:02 vm05 ceph-mon[49764]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:02.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:02 vm05 ceph-mon[54361]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:03.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:02 vm09 ceph-mon[49358]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:03.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:03.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:03.523Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:03.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:03.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:03.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:03.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:04.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:04 vm05 ceph-mon[49764]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:04 vm05 ceph-mon[54361]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:05.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:04 vm09 ceph-mon[49358]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:05.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:05 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:05 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:06.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:05 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:06.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:06 vm05 ceph-mon[49764]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:06.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:06] "GET /metrics HTTP/1.1" 200 214395 "" "Prometheus/2.33.4" 2026-03-09T15:33:06.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:06 vm05 ceph-mon[54361]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:07.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:06 vm09 ceph-mon[49358]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:08.811 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:08 vm09 ceph-mon[49358]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:08.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:08] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:33:08.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:08 vm05 ceph-mon[49764]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:08.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:08 vm05 ceph-mon[54361]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:10.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:10 vm05 ceph-mon[49764]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:10.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:10 vm05 ceph-mon[54361]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:11.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:10 vm09 ceph-mon[49358]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:12.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:12 vm05 ceph-mon[49764]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:12 vm05 ceph-mon[54361]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:13.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:12 vm09 ceph-mon[49358]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:13.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:13.521Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:13.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:13.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:13.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:13.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:14.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:14 vm05 ceph-mon[49764]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:14.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:14 vm05 ceph-mon[54361]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:15.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:14 vm09 ceph-mon[49358]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:15 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:15 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:15 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:16.470 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:33:16.667 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:16 vm05 ceph-mon[49764]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:16.668 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:16] "GET /metrics HTTP/1.1" 200 214395 "" "Prometheus/2.33.4" 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (4m) 2m ago 4m 18.7M - ba2b418f427c f0e454ec34d8 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (4m) 2m ago 4m 48.8M - 8.3.5 dad864ee21e9 3727d7279dc9 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (4m) 2m ago 4m 45.5M - 3.5 e1d6a67b021e ed0541340202 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443 running (6m) 2m ago 6m 418M - 17.2.0 e1d6a67b021e e6ea01192c0c 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:9283 running (6m) 2m ago 6m 468M - 17.2.0 e1d6a67b021e 25a6783f54e5 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (6m) 2m ago 6m 54.6M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:33:16.917 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (6m) 2m ago 6m 50.7M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (6m) 2m ago 6m 49.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (4m) 2m ago 4m 18.9M - 1dbe0e931976 9e0bcae9a93c 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (4m) 2m ago 4m 13.7M - 1dbe0e931976 b80da7ef9167 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (5m) 2m ago 5m 51.4M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (5m) 2m ago 5m 49.7M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (5m) 2m ago 5m 46.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (5m) 2m ago 5m 52.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (5m) 2m ago 5m 49.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (5m) 2m ago 5m 47.0M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (5m) 2m ago 5m 47.4M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (4m) 2m ago 4m 48.9M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (4m) 2m ago 4m 47.8M - 514e6a882f6e d95368d63cb2 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (4m) 2m ago 4m 89.2M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (4m) 2m ago 4m 89.2M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (4m) 2m ago 4m 90.5M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:33:16.918 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (4m) 2m ago 4m 87.8M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:33:16.918 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:16 vm05 ceph-mon[54361]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:16 vm09 ceph-mon[49358]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:17.182 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:33:17.182 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:33:17.182 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:33:17.182 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:17.182 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 17 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:33:17.183 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "0/23 daemons upgraded", 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Pulling quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df image on host vm09" 2026-03-09T15:33:17.404 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:33:17.641 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[49764]: from='client.24856 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[49764]: from='client.24862 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2639132534' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/4014376159' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[54361]: from='client.24856 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[54361]: from='client.24862 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2639132534' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:33:17.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:17 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/4014376159' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:33:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:17 vm09 ceph-mon[49358]: from='client.24856 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:17 vm09 ceph-mon[49358]: from='client.24862 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:17 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2639132534' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:33:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:17 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/4014376159' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:33:18.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:18 vm09 ceph-mon[49358]: from='client.24868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:18 vm09 ceph-mon[49358]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:18.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:18 vm09 ceph-mon[49358]: from='client.15003 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:18] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:33:18.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:18 vm05 ceph-mon[49764]: from='client.24868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:18 vm05 ceph-mon[49764]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:18.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:18 vm05 ceph-mon[49764]: from='client.15003 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:18 vm05 ceph-mon[54361]: from='client.24868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:18.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:18 vm05 ceph-mon[54361]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:18.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:18 vm05 ceph-mon[54361]: from='client.15003 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:21.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:20 vm05 ceph-mon[49764]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:21.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:20 vm05 ceph-mon[54361]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:21.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:20 vm09 ceph-mon[49358]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:22.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:21 vm05 ceph-mon[49764]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:22.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:21 vm05 ceph-mon[54361]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:22.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:21 vm09 ceph-mon[49358]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:23.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:23.522Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:23.524Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:23.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:23.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:23.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:24.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:24 vm09 ceph-mon[49358]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:24.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:24 vm05 ceph-mon[49764]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:24.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:24 vm05 ceph-mon[54361]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:25.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:25 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:25.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:25 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:25.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:25 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:26.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:26 vm09 ceph-mon[49358]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:26.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:26 vm05 ceph-mon[49764]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:26.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:26 vm05 ceph-mon[54361]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:26.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:26] "GET /metrics HTTP/1.1" 200 214390 "" "Prometheus/2.33.4" 2026-03-09T15:33:28.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:28 vm05 ceph-mon[49764]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:28.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:28 vm05 ceph-mon[54361]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:28.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:28 vm09 ceph-mon[49358]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:28.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[50377]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:28] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:33:31.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:30 vm09 ceph-mon[49358]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:31.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:30 vm05 ceph-mon[49764]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:31.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:30 vm05 ceph-mon[54361]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:32.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:31 vm05 ceph-mon[49764]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:32.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:31 vm05 ceph-mon[54361]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:32.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:31 vm09 ceph-mon[49358]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:33.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:33.523Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:33.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:33.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:33.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:33.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:34.549 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:34 vm09 ceph-mon[49358]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:34.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:34 vm05 ceph-mon[49764]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:34.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:34 vm05 ceph-mon[54361]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:35.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:34 vm09 systemd[1]: Stopping Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:35.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 bash[68782]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr.x" found: no such container 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 podman[68789]: 2026-03-09 15:33:35.07024356 +0000 UTC m=+0.051058373 container died e6ea01192c0c20e4b685d1bb94da2e5649a7fbf630014ae4347f3c377ed3eda9 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, RELEASE=HEAD, architecture=x86_64, GIT_BRANCH=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, version=8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.component=centos-stream-container, name=centos-stream, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, maintainer=Guillaume Abrioux , io.k8s.display-name=CentOS Stream 8, release=754, build-date=2022-05-03T08:36:31.336870, ceph=True) 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 podman[68789]: 2026-03-09 15:33:35.095914709 +0000 UTC m=+0.076729522 container remove e6ea01192c0c20e4b685d1bb94da2e5649a7fbf630014ae4347f3c377ed3eda9 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, distribution-scope=public, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.display-name=CentOS Stream 8, maintainer=Guillaume Abrioux , ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, GIT_REPO=https://github.com/ceph/ceph-container.git, name=centos-stream, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, vcs-type=git, version=8, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream) 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 bash[68789]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 bash[68809]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr.x" found: no such container 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service: Failed with result 'exit-code'. 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 systemd[1]: Stopped Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service: Consumed 11.834s CPU time. 2026-03-09T15:33:35.343 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 systemd[1]: Starting Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: Upgrade: Updating mgr.x 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: Deploying daemon mgr.x on vm09 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:35 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 podman[68895]: 2026-03-09 15:33:35.462708295 +0000 UTC m=+0.021792369 container create d5e0fdba3128bbe7771eba33c9da20b451ae017e438617a3fb5f93d9a8177924 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2) 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 podman[68895]: 2026-03-09 15:33:35.506973848 +0000 UTC m=+0.066057922 container init d5e0fdba3128bbe7771eba33c9da20b451ae017e438617a3fb5f93d9a8177924 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, org.label-schema.schema-version=1.0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 podman[68895]: 2026-03-09 15:33:35.510193953 +0000 UTC m=+0.069278027 container start d5e0fdba3128bbe7771eba33c9da20b451ae017e438617a3fb5f93d9a8177924 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3) 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 bash[68895]: d5e0fdba3128bbe7771eba33c9da20b451ae017e438617a3fb5f93d9a8177924 2026-03-09T15:33:35.594 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 podman[68895]: 2026-03-09 15:33:35.454455615 +0000 UTC m=+0.013539699 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:33:35.595 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 systemd[1]: Started Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:35.807 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:35.589Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": dial tcp 192.168.123.109:8443: connect: connection refused" 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: Upgrade: Updating mgr.x 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: Deploying daemon mgr.x on vm09 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:35.807 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: Upgrade: Updating mgr.x 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: Deploying daemon mgr.x on vm09 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:35.808 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:35 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:35.967 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:35.644+0000 7f89de37c140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:33:35.967 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:35.698+0000 7f89de37c140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:33:36.246 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:36.240+0000 7f89de37c140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:33:36.486 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:36] "GET /metrics HTTP/1.1" 200 214399 "" "Prometheus/2.33.4" 2026-03-09T15:33:36.486 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:36.180Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": dial tcp 192.168.123.109:8443: connect: connection refused" 2026-03-09T15:33:36.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:36 vm09 ceph-mon[49358]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:36.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:36 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:36.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:36.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:36 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:33:36.815 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:36.698+0000 7f89de37c140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:33:36.815 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:33:36.815 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:33:36.815 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: from numpy import show_config as show_numpy_config 2026-03-09T15:33:36.815 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:36.804+0000 7f89de37c140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[49764]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[54361]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:36.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:36 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:33:37.220 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:36.846+0000 7f89de37c140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:33:37.221 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:36.942+0000 7f89de37c140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:33:37.769 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:37.625+0000 7f89de37c140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:33:38.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:37.768+0000 7f89de37c140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:33:38.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:37.819+0000 7f89de37c140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:33:38.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:37.864+0000 7f89de37c140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:33:38.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:37.916+0000 7f89de37c140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:33:38.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:37.967+0000 7f89de37c140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:33:38.436 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:38.179+0000 7f89de37c140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:33:38.436 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:38.256+0000 7f89de37c140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:33:38.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:38 vm05 ceph-mon[49764]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:38.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:38 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:38.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:38 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:38.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:38 vm05 ceph-mon[54361]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:38.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:38 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:38.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:38 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:38.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:38 vm09 ceph-mon[49358]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:33:38.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:38 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:38.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:38 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:38.813 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:38.591+0000 7f89de37c140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:33:39.281 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:38.944+0000 7f89de37c140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:33:39.281 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:38 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:38.993+0000 7f89de37c140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:33:39.281 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.042+0000 7f89de37c140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:33:39.281 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.146+0000 7f89de37c140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:33:39.281 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.188+0000 7f89de37c140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:33:39.281 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.278+0000 7f89de37c140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:33:39.567 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.412+0000 7f89de37c140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.575+0000 7f89de37c140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:39.619+0000 7f89de37c140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:39] ENGINE Bus STARTING 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: CherryPy Checker: 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: The Application mounted at '' has an empty config. 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:39] ENGINE Serving on http://:::9283 2026-03-09T15:33:40.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:39] ENGINE Bus STARTED 2026-03-09T15:33:40.610 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:40.304Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:40.868 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: Standby manager daemon x restarted 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: Standby manager daemon x started 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: Failing over to other MGR 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[49764]: osdmap e78: 8 total, 8 up, 8 in 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:40.625+0000 7f1cd0fb7700 -1 mgr handle_mgr_map I was active but no longer am 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ignoring --setuser ceph since I am not root 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ignoring --setgroup ceph since I am not root 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:40.794+0000 7f182a3ea000 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: Standby manager daemon x restarted 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: Failing over to other MGR 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-09T15:33:40.869 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:40 vm05 ceph-mon[54361]: osdmap e78: 8 total, 8 up, 8 in 2026-03-09T15:33:40.870 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:40.823Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=6 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:40 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:40] ENGINE Bus STOPPING 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:40 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:40] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:40 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:40] ENGINE Bus STOPPED 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 97 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: Standby manager daemon x restarted 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:33:40.902 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: Upgrade: Need to upgrade myself (mgr.y) 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: Failing over to other MGR 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.24307 192.168.123.105:0/1342241433' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd=[{"prefix": "mgr fail", "who": "y"}]: dispatch 2026-03-09T15:33:40.903 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:40 vm09 ceph-mon[49358]: osdmap e78: 8 total, 8 up, 8 in 2026-03-09T15:33:41.155 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:40 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:40.867+0000 7f182a3ea000 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:33:41.225 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:40 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:40] ENGINE Bus STARTING 2026-03-09T15:33:41.225 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:41 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:41] ENGINE Serving on http://:::9283 2026-03-09T15:33:41.225 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:41 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:33:41] ENGINE Bus STARTED 2026-03-09T15:33:41.486 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:41 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:41.353+0000 7f182a3ea000 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:33:41.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: mgrmap e21: x(active, starting, since 0.956106s) 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Manager daemon x is now available 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Queued rgw.foo for migration 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Queued rgw.smpl for migration 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Checking for cert/key for grafana.a 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Deploying cephadm binary to vm09 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[49764]: Deploying cephadm binary to vm05 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:41 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:41.805+0000 7f182a3ea000 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:33:41.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: mgrmap e21: x(active, starting, since 0.956106s) 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Manager daemon x is now available 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Queued rgw.foo for migration 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Queued rgw.smpl for migration 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Checking for cert/key for grafana.a 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Deploying cephadm binary to vm09 2026-03-09T15:33:41.966 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:41 vm05 ceph-mon[54361]: Deploying cephadm binary to vm05 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:33:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24307 ' entity='mgr.y' cmd='[{"prefix": "mgr fail", "who": "y"}]': finished 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: mgrmap e21: x(active, starting, since 0.956106s) 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Manager daemon x is now available 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Queued rgw.foo for migration 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Queued rgw.smpl for migration 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'foo', 'service_name': 'rgw.foo', 'service_type': 'rgw', 'spec': {'rgw_frontend_port': 8000, 'rgw_realm': 'r', 'rgw_zone': 'z'}} 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: No Migration is needed for rgw spec: {'placement': {'count': 2}, 'service_id': 'smpl', 'service_name': 'rgw.smpl', 'service_type': 'rgw'} 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Migrating certs/keys for iscsi.foo spec to cert store 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Migrating certs/keys for rgw.foo spec to cert store 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Migrating certs/keys for rgw.smpl spec to cert store 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Checking for cert/key for grafana.a 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/x/trash_purge_schedule"}]: dispatch 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Deploying cephadm binary to vm09 2026-03-09T15:33:42.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:41 vm09 ceph-mon[49358]: Deploying cephadm binary to vm05 2026-03-09T15:33:42.235 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:41 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:41.962+0000 7f182a3ea000 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:33:42.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:42.023+0000 7f182a3ea000 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:33:42.651 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:42.236+0000 7f182a3ea000 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:33:42.652 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[49764]: mgrmap e22: x(active, since 1.98366s) 2026-03-09T15:33:42.652 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[49764]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:42.652 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.652 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.739 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:42 vm09 ceph-mon[49358]: mgrmap e22: x(active, since 1.98366s) 2026-03-09T15:33:42.739 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:42 vm09 ceph-mon[49358]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:42.739 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:42 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.739 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:42 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[54361]: mgrmap e22: x(active, since 1.98366s) 2026-03-09T15:33:42.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[54361]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:42.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:42 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:42.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:42.753Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=7 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:43.370 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:43.104+0000 7f182a3ea000 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:33:43.370 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:43.336+0000 7f182a3ea000 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:33:43.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:43.417+0000 7f182a3ea000 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:33:43.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:43.474+0000 7f182a3ea000 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:33:43.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:43.538+0000 7f182a3ea000 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:33:43.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:43.622+0000 7f182a3ea000 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:33:43.626 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:43.524Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:33:43.626 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:43.525Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:33:43.627 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:43.526Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:43.627 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:43.526Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs; ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:43.627 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:43.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:33:43.627 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:43.527Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:43.931 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:43.931 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: mgrmap e23: x(active, since 3s) 2026-03-09T15:33:43.931 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: [09/Mar/2026:15:33:42] ENGINE Bus STARTING 2026-03-09T15:33:43.931 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: [09/Mar/2026:15:33:42] ENGINE Serving on http://192.168.123.109:8765 2026-03-09T15:33:43.931 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: [09/Mar/2026:15:33:43] ENGINE Serving on https://192.168.123.109:7150 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: [09/Mar/2026:15:33:43] ENGINE Bus STARTED 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[49764]: [09/Mar/2026:15:33:43] ENGINE Client ('192.168.123.109', 40974) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: mgrmap e23: x(active, since 3s) 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: [09/Mar/2026:15:33:42] ENGINE Bus STARTING 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: [09/Mar/2026:15:33:42] ENGINE Serving on http://192.168.123.109:8765 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: [09/Mar/2026:15:33:43] ENGINE Serving on https://192.168.123.109:7150 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: [09/Mar/2026:15:33:43] ENGINE Bus STARTED 2026-03-09T15:33:43.932 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:43 vm05 ceph-mon[54361]: [09/Mar/2026:15:33:43] ENGINE Client ('192.168.123.109', 40974) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:33:44.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:44.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: mgrmap e23: x(active, since 3s) 2026-03-09T15:33:44.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: [09/Mar/2026:15:33:42] ENGINE Bus STARTING 2026-03-09T15:33:44.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: [09/Mar/2026:15:33:42] ENGINE Serving on http://192.168.123.109:8765 2026-03-09T15:33:44.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: [09/Mar/2026:15:33:43] ENGINE Serving on https://192.168.123.109:7150 2026-03-09T15:33:44.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: [09/Mar/2026:15:33:43] ENGINE Bus STARTED 2026-03-09T15:33:44.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:43 vm09 ceph-mon[49358]: [09/Mar/2026:15:33:43] ENGINE Client ('192.168.123.109', 40974) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:33:44.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:44.067+0000 7f182a3ea000 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:33:44.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:44.157+0000 7f182a3ea000 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:33:44.839 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: mgrmap e24: x(active, since 4s) 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.840 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:44 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: mgrmap e24: x(active, since 4s) 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: mgrmap e24: x(active, since 4s) 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:44.983 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:44 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:45.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:44.981+0000 7f182a3ea000 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:33:45.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:45.064+0000 7f182a3ea000 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:33:45.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:45.151+0000 7f182a3ea000 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:33:45.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:45.308+0000 7f182a3ea000 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:33:45.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:45.382+0000 7f182a3ea000 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:33:45.626 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:45.516+0000 7f182a3ea000 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[49764]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:45.625+0000 7f182a3ea000 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[54361]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:45 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:45 vm09 ceph-mon[49358]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:45 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:45 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:45 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:45 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:46.394 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:46.014+0000 7f182a3ea000 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:46.090+0000 7f182a3ea000 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:33:46] ENGINE Bus STARTING 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: CherryPy Checker: 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: The Application mounted at '' has an empty config. 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:33:46] ENGINE Serving on http://:::9283 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: [09/Mar/2026:15:33:46] ENGINE Bus STARTED 2026-03-09T15:33:46.402 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:46] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:33:46.736 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:46.509Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=4 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Standby manager daemon y started 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:33:47.206 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Standby manager daemon y started 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:33:47.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:46 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Standby manager daemon y started 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/3375268351' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:33:47.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:46 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:47.486 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 systemd[1]: Stopping Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 podman[81254]: 2026-03-09 15:33:47.489094715 +0000 UTC m=+0.023261086 container died 9e0bcae9a93c5cdd9b8309a9a3f2f027509f56e92e5f40e3ec1f07ee74d19a8f (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 podman[81254]: 2026-03-09 15:33:47.513658371 +0000 UTC m=+0.047824742 container remove 9e0bcae9a93c5cdd9b8309a9a3f2f027509f56e92e5f40e3ec1f07ee74d19a8f (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 bash[81254]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 systemd[1]: Stopped Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:47.806 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service: Consumed 1.229s CPU time. 2026-03-09T15:33:47.941 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: mgrmap e25: x(active, since 7s), standbys: y 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[49764]: Deploying daemon node-exporter.a on vm05 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: mgrmap e25: x(active, since 7s), standbys: y 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T15:33:48.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:47 vm05 ceph-mon[54361]: Deploying daemon node-exporter.a on vm05 2026-03-09T15:33:48.237 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 systemd[1]: Starting Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:48.237 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:47 vm05 bash[81387]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: mgrmap e25: x(active, since 7s), standbys: y 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: Reconfiguring node-exporter.a (dependencies changed)... 2026-03-09T15:33:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:47 vm09 ceph-mon[49358]: Deploying daemon node-exporter.a on vm05 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (4m) 4s ago 4m 24.5M - 0.23.0 ba2b418f427c f0e454ec34d8 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (4m) 4s ago 4m 49.7M - dad864ee21e9 3727d7279dc9 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (4m) 4s ago 4m 48.7M - 3.5 e1d6a67b021e ed0541340202 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283 running (12s) 4s ago 6m 531M - 19.2.3-678-ge911bdeb 654f31e6858e d5e0fdba3128 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:9283 running (7m) 4s ago 7m 184M - 17.2.0 e1d6a67b021e 25a6783f54e5 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (7m) 4s ago 7m 54.5M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (6m) 4s ago 6m 44.1M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:33:48.382 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (6m) 4s ago 6m 41.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (5m) 4s ago 5m 21.1M - 1.3.1 1dbe0e931976 9e0bcae9a93c 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (5m) 4s ago 5m 18.0M - 1.3.1 1dbe0e931976 b80da7ef9167 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (6m) 4s ago 6m 53.6M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (6m) 4s ago 6m 51.7M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (6m) 4s ago 6m 48.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (5m) 4s ago 5m 54.4M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (5m) 4s ago 5m 51.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (5m) 4s ago 5m 49.2M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (5m) 4s ago 5m 49.5M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (5m) 4s ago 5m 50.9M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (4m) 4s ago 5m 58.9M - 2.33.4 514e6a882f6e d95368d63cb2 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (4m) 4s ago 4m 90.5M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (4m) 4s ago 4m 90.4M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (4m) 4s ago 4m 91.9M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:33:48.383 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (4m) 4s ago 4m 89.1M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 16, 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:33:48.641 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:33:48.642 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:48 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:48.347Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=5 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:48.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:48 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:48] "GET /metrics HTTP/1.1" 200 34770 "" "Prometheus/2.33.4" 2026-03-09T15:33:48.868 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:33:48.868 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:33:48.868 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:33:48.868 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:33:48.868 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:33:48.868 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "1/23 daemons upgraded", 2026-03-09T15:33:48.869 INFO:teuthology.orchestra.run.vm05.stdout: "message": "", 2026-03-09T15:33:48.869 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:33:48.869 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:33:49.131 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[49764]: from='client.24913 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[49764]: from='client.24919 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[49764]: from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2795738213' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:49 vm05 bash[81387]: Getting image source signatures 2026-03-09T15:33:49.236 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:49 vm05 bash[81387]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-09T15:33:49.236 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:49 vm05 bash[81387]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-09T15:33:49.236 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:49 vm05 bash[81387]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[54361]: from='client.24913 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[54361]: from='client.24919 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[54361]: from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:48 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2795738213' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:33:49.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:48 vm09 ceph-mon[49358]: from='client.24913 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:48 vm09 ceph-mon[49358]: from='client.24919 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:48 vm09 ceph-mon[49358]: from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:49.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:48 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2795738213' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:33:50.332 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-mon[49764]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:33:50.332 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-mon[49764]: from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:50.332 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/790584795' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:33:50.332 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:50 vm05 ceph-mon[54361]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:33:50.332 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:50 vm05 ceph-mon[54361]: from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:50.332 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:50 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/790584795' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:33:50.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:50 vm09 ceph-mon[49358]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:33:50.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:50 vm09 ceph-mon[49358]: from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:33:50.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:50 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/790584795' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 bash[81387]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 bash[81387]: Writing manifest to image destination 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 podman[81387]: 2026-03-09 15:33:50.340482423 +0000 UTC m=+2.430082241 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 podman[81387]: 2026-03-09 15:33:50.347469109 +0000 UTC m=+2.437068917 container create e730a028339ffc53981bc8c8f675448deb9b6024a2becdb9520129889e99f1bf (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 podman[81387]: 2026-03-09 15:33:50.382027968 +0000 UTC m=+2.471627797 container init e730a028339ffc53981bc8c8f675448deb9b6024a2becdb9520129889e99f1bf (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 podman[81387]: 2026-03-09 15:33:50.386627267 +0000 UTC m=+2.476227085 container start e730a028339ffc53981bc8c8f675448deb9b6024a2becdb9520129889e99f1bf (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 bash[81387]: e730a028339ffc53981bc8c8f675448deb9b6024a2becdb9520129889e99f1bf 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.389Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.390Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=arp 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-09T15:33:50.737 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=edac 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.391Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=os 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=stat 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=time 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=uname 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.392Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.393Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a[81562]: ts=2026-03-09T15:33:50.393Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-09T15:33:50.738 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:33:50 vm05 systemd[1]: Started Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[49764]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[49764]: Deploying daemon alertmanager.a on vm05 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[54361]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T15:33:51.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:51 vm05 ceph-mon[54361]: Deploying daemon alertmanager.a on vm05 2026-03-09T15:33:51.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:51 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:51.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:51 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:51.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:51 vm09 ceph-mon[49358]: Reconfiguring alertmanager.a (dependencies changed)... 2026-03-09T15:33:51.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:51 vm09 ceph-mon[49358]: Deploying daemon alertmanager.a on vm05 2026-03-09T15:33:52.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:52 vm05 ceph-mon[49764]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T15:33:52.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:52 vm05 ceph-mon[54361]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T15:33:52.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:52 vm09 ceph-mon[49358]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T15:33:53.797 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:53.525Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:53.798 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=error ts=2026-03-09T15:33:53.527Z caller=dispatch.go:354 component=dispatcher msg="Notify for alerts failed" num_alerts=4 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs; ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:53.798 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:53.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:53.798 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:53.529Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:53.798 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:53.530Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.109:8443/api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.109 because it doesn't contain any IP SANs" 2026-03-09T15:33:53.798 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=warn ts=2026-03-09T15:33:53.534Z caller=notify.go:724 component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://192.168.123.105:8443//api/prometheus_receiver\": x509: cannot validate certificate for 192.168.123.105 because it doesn't contain any IP SANs" 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:53 vm05 systemd[1]: Stopping Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[76619]: level=info ts=2026-03-09T15:33:54.005Z caller=main.go:557 msg="Received SIGTERM, exiting gracefully..." 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81825]: 2026-03-09 15:33:54.017978752 +0000 UTC m=+0.030540879 container died f0e454ec34d8b89f27d1b037be47d532d42b20ffd070a24b54dfb357e9c322fc (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81825]: 2026-03-09 15:33:54.036499475 +0000 UTC m=+0.049061602 container remove f0e454ec34d8b89f27d1b037be47d532d42b20ffd070a24b54dfb357e9c322fc (image=quay.io/prometheus/alertmanager:v0.23.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81825]: 2026-03-09 15:33:54.037979248 +0000 UTC m=+0.050541386 volume remove ba4e936013d823314895a7a3c286425426345366db7de94e659df5ed47a8a7ed 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 bash[81825]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@alertmanager.a.service: Deactivated successfully. 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 systemd[1]: Stopped Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:54.205 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@alertmanager.a.service: Consumed 1.033s CPU time. 2026-03-09T15:33:54.734 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:54 vm05 ceph-mon[54361]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 systemd[1]: Starting Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81934]: 2026-03-09 15:33:54.438099788 +0000 UTC m=+0.017675556 volume create 5502f6f6c0f3c05e563740087d78aaf95bd4c119e4feff12ddfbcd062d5445c7 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81934]: 2026-03-09 15:33:54.440898234 +0000 UTC m=+0.020474002 container create 93224b6bb99a8ed36d55312ab96bebfc743219a6323303487fcd1a0eaf60b67b (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81934]: 2026-03-09 15:33:54.469616307 +0000 UTC m=+0.049192086 container init 93224b6bb99a8ed36d55312ab96bebfc743219a6323303487fcd1a0eaf60b67b (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81934]: 2026-03-09 15:33:54.47743648 +0000 UTC m=+0.057012248 container start 93224b6bb99a8ed36d55312ab96bebfc743219a6323303487fcd1a0eaf60b67b (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 bash[81934]: 93224b6bb99a8ed36d55312ab96bebfc743219a6323303487fcd1a0eaf60b67b 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 podman[81934]: 2026-03-09 15:33:54.431517758 +0000 UTC m=+0.011093536 image pull c8568f914cd25b2062c44e9f79f9c18da6e3b85fe0c47a12a2191c61426c2b19 quay.io/prometheus/alertmanager:v0.25.0 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 systemd[1]: Started Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.500Z caller=main.go:240 level=info msg="Starting Alertmanager" version="(version=0.25.0, branch=HEAD, revision=258fab7cdd551f2cf251ed0348f0ad7289aee789)" 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.500Z caller=main.go:241 level=info build_context="(go=go1.19.4, user=root@abe866dd5717, date=20221222-14:51:36)" 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.501Z caller=cluster.go:185 level=info component=cluster msg="setting advertise address explicitly" addr=192.168.123.105 port=9094 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.503Z caller=cluster.go:681 level=info component=cluster msg="Waiting for gossip to settle..." interval=2s 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.542Z caller=coordinator.go:113 level=info component=configuration msg="Loading configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.543Z caller=coordinator.go:126 level=info component=configuration msg="Completed loading of configuration file" file=/etc/alertmanager/alertmanager.yml 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.547Z caller=tls_config.go:232 level=info msg="Listening on" address=[::]:9093 2026-03-09T15:33:54.734 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:54.547Z caller=tls_config.go:235 level=info msg="TLS is disabled." http2=false address=[::]:9093 2026-03-09T15:33:54.734 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:54 vm05 ceph-mon[49764]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:33:54.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:54 vm09 ceph-mon[49358]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: Reconfiguring iscsi.foo.vm05.rfsich (dependencies changed)... 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[49764]: Reconfiguring daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: Reconfiguring iscsi.foo.vm05.rfsich (dependencies changed)... 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:55.728 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:55 vm05 ceph-mon[54361]: Reconfiguring daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: from='client.14739 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: Reconfiguring iscsi.foo.vm05.rfsich (dependencies changed)... 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:33:55.780 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:55 vm09 ceph-mon[49358]: Reconfiguring daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:33:56.063 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:55 vm09 systemd[1]: Stopping Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 podman[71531]: 2026-03-09 15:33:56.104446835 +0000 UTC m=+0.035739873 container died b80da7ef91675d378780d3d39f6352eaa1338048014243676b24b849ac270863 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 podman[71531]: 2026-03-09 15:33:56.128831444 +0000 UTC m=+0.060124482 container remove b80da7ef91675d378780d3d39f6352eaa1338048014243676b24b849ac270863 (image=quay.io/prometheus/node-exporter:v1.3.1, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 bash[71531]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 systemd[1]: Stopped Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:56.383 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service: Consumed 1.301s CPU time. 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[49764]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/1981186130' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3779028540' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2557587341"}]: dispatch 2026-03-09T15:33:56.736 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:33:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:33:56.504Z caller=cluster.go:706 level=info component=cluster msg="gossip not settled" polls=0 before=0 now=1 elapsed=2.00104794s 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:33:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[50028]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:56] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.33.4" 2026-03-09T15:33:56.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[54361]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T15:33:56.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:56.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:56.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:33:56.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/1981186130' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:33:56.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:56 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3779028540' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2557587341"}]: dispatch 2026-03-09T15:33:56.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:56 vm09 ceph-mon[49358]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 0 B/s wr, 6 op/s 2026-03-09T15:33:56.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:56 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:56.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:56 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:56.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:56 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:33:56.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:56 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/1981186130' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:33:56.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:56 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3779028540' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2557587341"}]: dispatch 2026-03-09T15:33:56.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 systemd[1]: Starting Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:33:56.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:56 vm09 bash[71643]: Trying to pull quay.io/prometheus/node-exporter:v1.7.0... 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[49764]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[49764]: Deploying daemon node-exporter.b on vm09 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3779028540' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2557587341"}]': finished 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[49764]: osdmap e79: 8 total, 8 up, 8 in 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3972067164' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]: dispatch 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]: dispatch 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[54361]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[54361]: Deploying daemon node-exporter.b on vm09 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3779028540' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2557587341"}]': finished 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[54361]: osdmap e79: 8 total, 8 up, 8 in 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3972067164' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]: dispatch 2026-03-09T15:33:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:57 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]: dispatch 2026-03-09T15:33:58.019 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:57 vm09 ceph-mon[49358]: Reconfiguring node-exporter.b (dependencies changed)... 2026-03-09T15:33:58.019 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:57 vm09 ceph-mon[49358]: Deploying daemon node-exporter.b on vm09 2026-03-09T15:33:58.019 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:57 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3779028540' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2557587341"}]': finished 2026-03-09T15:33:58.019 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:57 vm09 ceph-mon[49358]: osdmap e79: 8 total, 8 up, 8 in 2026-03-09T15:33:58.019 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:57 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3972067164' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]: dispatch 2026-03-09T15:33:58.019 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:57 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]: dispatch 2026-03-09T15:33:58.312 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: Getting image source signatures 2026-03-09T15:33:58.312 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: Copying blob sha256:2abcce694348cd2c949c0e98a7400ebdfd8341021bcf6b541bc72033ce982510 2026-03-09T15:33:58.312 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: Copying blob sha256:324153f2810a9927fcce320af9e4e291e0b6e805cbdd1f338386c756b9defa24 2026-03-09T15:33:58.312 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: Copying blob sha256:455fd88e5221bc1e278ef2d059cd70e4df99a24e5af050ede621534276f6cf9a 2026-03-09T15:33:58.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-mon[49358]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T15:33:58.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]': finished 2026-03-09T15:33:58.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-mon[49358]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T15:33:58.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3213024347' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]: dispatch 2026-03-09T15:33:58.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]: dispatch 2026-03-09T15:33:58.813 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:33:58] "GET /metrics HTTP/1.1" 200 37766 "" "Prometheus/2.33.4" 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: Copying config sha256:72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: Writing manifest to image destination 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 podman[71643]: 2026-03-09 15:33:58.752387825 +0000 UTC m=+2.110814693 container create a360ac0679f496154a5c2e8f12313ae81e0f281d83d357e933dee670fc82376d (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 podman[71643]: 2026-03-09 15:33:58.743611945 +0000 UTC m=+2.102038822 image pull 72c9c208898624938c9e4183d6686ea4a5fd3f912bc29bc3f00147924c521a3e quay.io/prometheus/node-exporter:v1.7.0 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 podman[71643]: 2026-03-09 15:33:58.778051219 +0000 UTC m=+2.136478088 container init a360ac0679f496154a5c2e8f12313ae81e0f281d83d357e933dee670fc82376d (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 podman[71643]: 2026-03-09 15:33:58.781252212 +0000 UTC m=+2.139679080 container start a360ac0679f496154a5c2e8f12313ae81e0f281d83d357e933dee670fc82376d (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 bash[71643]: a360ac0679f496154a5c2e8f12313ae81e0f281d83d357e933dee670fc82376d 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.784Z caller=node_exporter.go:192 level=info msg="Starting node_exporter" version="(version=1.7.0, branch=HEAD, revision=7333465abf9efba81876303bb57e6fadb946041b)" 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.785Z caller=node_exporter.go:193 level=info msg="Build context" build_context="(go=go1.21.4, platform=linux/amd64, user=root@35918982f6d8, date=20231112-23:53:35, tags=netgo osusergo static_build)" 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.786Z caller=diskstats_common.go:111 level=info collector=diskstats msg="Parsed flag --collector.diskstats.device-exclude" flag=^(ram|loop|fd|(h|s|v|xv)d[a-z]|nvme\d+n\d+p)\d+$ 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 systemd[1]: Started Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.790Z caller=diskstats_linux.go:265 level=error collector=diskstats msg="Failed to open directory, disabling udev device properties" path=/run/udev/data 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.790Z caller=filesystem_common.go:111 level=info collector=filesystem msg="Parsed flag --collector.filesystem.mount-points-exclude" flag=^/(dev|proc|run/credentials/.+|sys|var/lib/docker/.+|var/lib/containers/storage/.+)($|/) 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.790Z caller=filesystem_common.go:113 level=info collector=filesystem msg="Parsed flag --collector.filesystem.fs-types-exclude" flag=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:110 level=info msg="Enabled collectors" 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=arp 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=bcache 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=bonding 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=btrfs 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=conntrack 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=cpu 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=cpufreq 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=diskstats 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=dmi 2026-03-09T15:33:58.813 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=edac 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=entropy 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=fibrechannel 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=filefd 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=filesystem 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=hwmon 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.791Z caller=node_exporter.go:117 level=info collector=infiniband 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=ipvs 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=loadavg 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=mdadm 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=meminfo 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=netclass 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=netdev 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=netstat 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=nfs 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=nfsd 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=nvme 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=os 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=powersupplyclass 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=pressure 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=rapl 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=schedstat 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=selinux 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=sockstat 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=softnet 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=stat 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=tapestats 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=textfile 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=thermal_zone 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.792Z caller=node_exporter.go:117 level=info collector=time 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=node_exporter.go:117 level=info collector=udp_queues 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=node_exporter.go:117 level=info collector=uname 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=node_exporter.go:117 level=info collector=vmstat 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=node_exporter.go:117 level=info collector=xfs 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=node_exporter.go:117 level=info collector=zfs 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=tls_config.go:274 level=info msg="Listening on" address=[::]:9100 2026-03-09T15:33:58.814 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:33:58 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b[71699]: ts=2026-03-09T15:33:58.793Z caller=tls_config.go:277 level=info msg="TLS is disabled." http2=false address=[::]:9100 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[49764]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]': finished 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[49764]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3213024347' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]: dispatch 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]: dispatch 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[54361]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 8 op/s 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3307379082"}]': finished 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[54361]: osdmap e80: 8 total, 8 up, 8 in 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3213024347' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]: dispatch 2026-03-09T15:33:58.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:58 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]: dispatch 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]': finished 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 1 op/s 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/899866139' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]: dispatch 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]: dispatch 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[49764]: Deploying daemon prometheus.a on vm09 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]': finished 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 1 op/s 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/899866139' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]: dispatch 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]: dispatch 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:33:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:33:59 vm05 ceph-mon[54361]: Deploying daemon prometheus.a on vm09 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3296056962"}]': finished 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: osdmap e81: 8 total, 8 up, 8 in 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 1 op/s 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/899866139' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]: dispatch 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]: dispatch 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:34:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:33:59 vm09 ceph-mon[49358]: Deploying daemon prometheus.a on vm09 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]': finished 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[49764]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2242253482' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]: dispatch 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]: dispatch 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]': finished 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[54361]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2242253482' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]: dispatch 2026-03-09T15:34:00.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:00 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]: dispatch 2026-03-09T15:34:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:00 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2151097925"}]': finished 2026-03-09T15:34:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:00 vm09 ceph-mon[49358]: osdmap e82: 8 total, 8 up, 8 in 2026-03-09T15:34:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:00 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2242253482' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]: dispatch 2026-03-09T15:34:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:00 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]: dispatch 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]': finished 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[49764]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[49764]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3043669182' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3348665640"}]: dispatch 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]': finished 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[54361]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[54361]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:01 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3043669182' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3348665640"}]: dispatch 2026-03-09T15:34:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:01 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/3348665640"}]': finished 2026-03-09T15:34:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:01 vm09 ceph-mon[49358]: osdmap e83: 8 total, 8 up, 8 in 2026-03-09T15:34:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:01 vm09 ceph-mon[49358]: pgmap v18: 161 pgs: 161 active+clean; 457 KiB data, 98 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:01 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3043669182' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3348665640"}]: dispatch 2026-03-09T15:34:02.752 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:02 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:02.313Z caller=manager.go:609 level=warn component="rule manager" group=pools msg="Evaluating rule failed" rule="alert: CephPoolGrowthWarning\nexpr: (predict_linear(ceph_pool_percent_used[2d], 3600 * 24 * 5) * on(pool_id) group_right()\n ceph_pool_metadata) >= 95\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.9.2\n severity: warning\n type: ceph_default\nannotations:\n description: |\n Pool '{{ $labels.name }}' will be full in less than 5 days assuming the average fill-up rate of the past 48 hours.\n summary: Pool growth rate may soon exceed it's capacity\n" err="found duplicate series for the match group {pool_id=\"1\"} on the left hand-side of the operation: [{instance=\"192.168.123.109:9283\", job=\"ceph\", pool_id=\"1\"}, {instance=\"192.168.123.105:9283\", job=\"ceph\", pool_id=\"1\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:02.752 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:02 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3043669182' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3348665640"}]': finished 2026-03-09T15:34:02.752 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:02 vm09 ceph-mon[49358]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T15:34:02.752 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:02 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/981967958' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]: dispatch 2026-03-09T15:34:02.752 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:02 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]: dispatch 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3043669182' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3348665640"}]': finished 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[49764]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/981967958' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]: dispatch 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]: dispatch 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3043669182' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/3348665640"}]': finished 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[54361]: osdmap e84: 8 total, 8 up, 8 in 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/981967958' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]: dispatch 2026-03-09T15:34:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:02 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]: dispatch 2026-03-09T15:34:03.964 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:03 vm09 ceph-mon[49358]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:03.964 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:03 vm09 ceph-mon[49358]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]': finished 2026-03-09T15:34:03.964 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:03 vm09 ceph-mon[49358]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T15:34:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:03 vm05 ceph-mon[49764]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:03 vm05 ceph-mon[49764]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]': finished 2026-03-09T15:34:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:03 vm05 ceph-mon[49764]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T15:34:04.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:03 vm05 ceph-mon[54361]: pgmap v20: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:04.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:03 vm05 ceph-mon[54361]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/442745881"}]': finished 2026-03-09T15:34:04.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:03 vm05 ceph-mon[54361]: osdmap e85: 8 total, 8 up, 8 in 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 systemd[1]: Stopping Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:775 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:798 level=info msg="Stopping scrape discovery manager..." 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:812 level=info msg="Stopping notify discovery manager..." 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:834 level=info msg="Stopping scrape manager..." 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:794 level=info msg="Scrape discovery manager stopped" 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:808 level=info msg="Notify discovery manager stopped" 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=manager.go:945 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=manager.go:955 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:828 level=info msg="Scrape manager stopped" 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=notifier.go:600 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.134Z caller=main.go:1054 level=info msg="Notifier manager stopped" 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[67305]: ts=2026-03-09T15:34:04.135Z caller=main.go:1066 level=info msg="See you next time!" 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 podman[72053]: 2026-03-09 15:34:04.145435248 +0000 UTC m=+0.023794198 container died d95368d63cb2b6c8dbbdd0d041e93933a78be915b2de0ca6d2ff08b1b2d29399 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 podman[72053]: 2026-03-09 15:34:04.160057105 +0000 UTC m=+0.038416055 container remove d95368d63cb2b6c8dbbdd0d041e93933a78be915b2de0ca6d2ff08b1b2d29399 (image=quay.io/prometheus/prometheus:v2.33.4, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 bash[72053]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@prometheus.a.service: Deactivated successfully. 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 systemd[1]: Stopped Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:04.407 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 systemd[1]: Starting Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:04.795 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 podman[72160]: 2026-03-09 15:34:04.526047211 +0000 UTC m=+0.022816935 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T15:34:04.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:04 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:04.505Z caller=cluster.go:698 level=info component=cluster msg="gossip settled; proceeding" elapsed=10.002342024s 2026-03-09T15:34:04.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:04 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:04.511Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:04.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:04 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:04.511Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 podman[72160]: 2026-03-09 15:34:04.794530202 +0000 UTC m=+0.291299925 container create bd1ff20936ae321db19ac40fc96e1fd5dda76d2d814c8b0e8a9e45d9dddd53f3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 podman[72160]: 2026-03-09 15:34:04.867312145 +0000 UTC m=+0.364081868 container init bd1ff20936ae321db19ac40fc96e1fd5dda76d2d814c8b0e8a9e45d9dddd53f3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 podman[72160]: 2026-03-09 15:34:04.870319014 +0000 UTC m=+0.367088738 container start bd1ff20936ae321db19ac40fc96e1fd5dda76d2d814c8b0e8a9e45d9dddd53f3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 bash[72160]: bd1ff20936ae321db19ac40fc96e1fd5dda76d2d814c8b0e8a9e45d9dddd53f3 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 systemd[1]: Started Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.905Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.905Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.905Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm09 (none))" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.905Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.905Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.917Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.918Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.919Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.919Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.921Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.921Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.422µs 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.921Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.932Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=2 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.956Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=2 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.956Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=2 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.956Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=16.682µs wal_replay_duration=35.391442ms wbl_replay_duration=150ns total_replay_duration=35.422019ms 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.958Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.959Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.959Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.975Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=16.467069ms db_storage=882ns remote_storage=992ns web_handler=440ns query_engine=942ns scrape=551.298µs scrape_sd=92.141µs notify=7.844µs notify_sd=6.283µs rules=15.480278ms tracing=6.753µs 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.976Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T15:34:05.064 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:04.976Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T15:34:05.522 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 systemd[1]: Stopping Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:05.522 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:34:05+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-09T15:34:05.522 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[64344]: t=2026-03-09T15:34:05+0000 lvl=info msg="Database locked, sleeping then retrying" logger=sqlstore error="database is locked" retry=0 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 podman[72299]: 2026-03-09 15:34:05.53402798 +0000 UTC m=+0.044029827 container died 3727d7279dc9a551fdbc1f0edbf538c16f8a9429809ea4974c28c752717fe905 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, description=Ceph Grafana Container, io.openshift.expose-services=, io.openshift.tags=base rhel8, release=236.1648460182, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vendor=Red Hat, Inc., com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, io.buildah.version=1.24.2, summary=Grafana Container configured for Ceph mgr/dashboard integration, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, architecture=x86_64, vcs-type=git, io.k8s.display-name=Red Hat Universal Base Image 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, com.redhat.component=ubi8-container, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8.5, build-date=2022-03-28T10:36:18.413762, distribution-scope=public, maintainer=Paul Cuzner , name=ubi8) 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 podman[72299]: 2026-03-09 15:34:05.555300995 +0000 UTC m=+0.065302842 container remove 3727d7279dc9a551fdbc1f0edbf538c16f8a9429809ea4974c28c752717fe905 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, io.k8s.display-name=Red Hat Universal Base Image 8, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, version=8.5, io.openshift.tags=base rhel8, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, architecture=x86_64, summary=Grafana Container configured for Ceph mgr/dashboard integration, io.buildah.version=1.24.2, name=ubi8, com.redhat.component=ubi8-container, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, description=Ceph Grafana Container, release=236.1648460182, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, build-date=2022-03-28T10:36:18.413762) 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 bash[72299]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 bash[72318]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana.a" found: no such container 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@grafana.a.service: Deactivated successfully. 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 systemd[1]: Stopped Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@grafana.a.service: Consumed 1.441s CPU time. 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 systemd[1]: Starting Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 podman[72357]: 2026-03-09 15:34:05.711833603 +0000 UTC m=+0.016957855 container create 6a58314a043eafb8f678557db139e544375686d49a492939138bab881ae891b3 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base rhel8, name=ubi8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, release=236.1648460182, com.redhat.component=ubi8-container, summary=Grafana Container configured for Ceph mgr/dashboard integration, maintainer=Paul Cuzner , description=Ceph Grafana Container, build-date=2022-03-28T10:36:18.413762, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, vendor=Red Hat, Inc., vcs-type=git, io.buildah.version=1.24.2, architecture=x86_64, version=8.5, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, distribution-scope=public, io.openshift.expose-services=) 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 podman[72357]: 2026-03-09 15:34:05.752756554 +0000 UTC m=+0.057880817 container init 6a58314a043eafb8f678557db139e544375686d49a492939138bab881ae891b3 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, distribution-scope=public, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, version=8.5, name=ubi8, io.openshift.tags=base rhel8, io.openshift.expose-services=, vendor=Red Hat, Inc., com.redhat.component=ubi8-container, summary=Grafana Container configured for Ceph mgr/dashboard integration, description=Ceph Grafana Container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, build-date=2022-03-28T10:36:18.413762, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Paul Cuzner , vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, release=236.1648460182, io.buildah.version=1.24.2, io.k8s.display-name=Red Hat Universal Base Image 8, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI) 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 podman[72357]: 2026-03-09 15:34:05.75612091 +0000 UTC m=+0.061245152 container start 6a58314a043eafb8f678557db139e544375686d49a492939138bab881ae891b3 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, io.openshift.tags=base rhel8, com.redhat.component=ubi8-container, vcs-type=git, io.buildah.version=1.24.2, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Grafana Container configured for Ceph mgr/dashboard integration, io.openshift.expose-services=, name=ubi8, description=Ceph Grafana Container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, io.k8s.display-name=Red Hat Universal Base Image 8, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, vendor=Red Hat, Inc., architecture=x86_64, version=8.5, maintainer=Paul Cuzner , build-date=2022-03-28T10:36:18.413762, release=236.1648460182) 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 bash[72357]: 6a58314a043eafb8f678557db139e544375686d49a492939138bab881ae891b3 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 podman[72357]: 2026-03-09 15:34:05.705091516 +0000 UTC m=+0.010215778 image pull dad864ee21e98e69f4029d1e417aa085001566be0d322fbc75bc6f29b0050c01 quay.io/ceph/ceph-grafana:8.3.5 2026-03-09T15:34:05.799 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 systemd[1]: Started Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="The state of unified alerting is still not defined. The decision will be made during as we run the database migrations" logger=settings 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=warn msg="falling back to legacy setting of 'min_interval_seconds'; please use the configuration option in the `unified_alerting` section if Grafana 8 alerts are enabled." logger=settings 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Config loaded from" logger=settings file=/usr/share/grafana/conf/defaults.ini 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Config loaded from" logger=settings file=/etc/grafana/grafana.ini 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_DATA=/var/lib/grafana" 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_LOGS=/var/log/grafana" 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PLUGINS=/var/lib/grafana/plugins" 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Config overridden from Environment variable" logger=settings var="GF_PATHS_PROVISIONING=/etc/grafana/provisioning" 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Path Home" logger=settings path=/usr/share/grafana 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Path Data" logger=settings path=/var/lib/grafana 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Path Logs" logger=settings path=/var/log/grafana 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Path Plugins" logger=settings path=/var/lib/grafana/plugins 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Path Provisioning" logger=settings path=/etc/grafana/provisioning 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="App mode production" logger=settings 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Connecting to DB" logger=sqlstore dbtype=sqlite3 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=warn msg="SQLite database file has broader permissions than it should" logger=sqlstore path=/var/lib/grafana/grafana.db mode=-rw-r--r-- expected=-rw-r----- 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Starting DB migrations" logger=migrator 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="migrations completed" logger=migrator performed=0 skipped=377 duration=353.649µs 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Created default organization" logger=sqlstore 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Initialising plugins" logger=plugin.manager 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=input 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=grafana-piechart-panel 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Plugin registered" logger=plugin.manager pluginId=vonage-status-panel 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="Live Push Gateway initialization" logger=live.push_http 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="deleted datasource based on configuration" logger=provisioning.datasources name=Dashboard1 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Dashboard1 uid=P43CA22E17D0F9596 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="inserting datasource from configuration " logger=provisioning.datasources name=Loki uid=P8E80F9AEF21F6940 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="warming cache for startup" logger=ngalert 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="starting MultiOrg Alertmanager" logger=ngalert.multiorg.alertmanager 2026-03-09T15:34:06.063 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:34:05+0000 lvl=info msg="HTTP Server Listen" logger=http.server address=[::]:3000 protocol=https subUrl= socket= 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1020 B/s rd, 0 op/s 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: Reconfiguring daemon grafana.a on vm09 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm05.local:9093"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:05 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.064 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:05 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:05] ENGINE Bus STOPPING 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1020 B/s rd, 0 op/s 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: Reconfiguring daemon grafana.a on vm09 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm05.local:9093"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: pgmap v22: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1020 B/s rd, 0 op/s 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: Reconfiguring grafana.a (dependencies changed)... 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: Regenerating cephadm self-signed grafana TLS certificates 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-03-09T15:34:06.184 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: Reconfiguring daemon grafana.a on vm09 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm05.local:9093"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T15:34:06.185 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:05 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:06.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:34:06.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STOPPED 2026-03-09T15:34:06.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STARTING 2026-03-09T15:34:06.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Serving on http://:::9283 2026-03-09T15:34:06.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STARTED 2026-03-09T15:34:06.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STOPPING 2026-03-09T15:34:06.751 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 systemd[1]: Stopping Ceph mgr.y for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='client.24949 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm05.local:9093"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: Upgrade: Updating mgr.y 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[49764]: Deploying daemon mgr.y on vm05 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 podman[82676]: 2026-03-09 15:34:06.869308132 +0000 UTC m=+0.060041085 container died 25a6783f54e5b8e9b5969e0bf3d9a55053504e4634ffff2077a7a6fc2b680b48 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, io.openshift.expose-services=, name=centos-stream, GIT_BRANCH=HEAD, GIT_CLEAN=True, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, distribution-scope=public, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., io.k8s.display-name=CentOS Stream 8, vcs-type=git, ceph=True, CEPH_POINT_RELEASE=-17.2.0, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, release=754, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, build-date=2022-05-03T08:36:31.336870) 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 podman[82676]: 2026-03-09 15:34:06.8994741 +0000 UTC m=+0.090207052 container remove 25a6783f54e5b8e9b5969e0bf3d9a55053504e4634ffff2077a7a6fc2b680b48 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, ceph=True, architecture=x86_64, GIT_REPO=https://github.com/ceph/ceph-container.git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.buildah.version=1.19.8, vendor=Red Hat, Inc., version=8, build-date=2022-05-03T08:36:31.336870, io.openshift.expose-services=, maintainer=Guillaume Abrioux , GIT_CLEAN=True, name=centos-stream, GIT_BRANCH=HEAD, io.openshift.tags=base centos centos-stream, com.redhat.component=centos-stream-container, release=754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, vcs-type=git, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 bash[82676]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service: Failed with result 'exit-code'. 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 systemd[1]: Stopped Ceph mgr.y for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:07.011 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:06 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service: Consumed 33.711s CPU time. 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='client.24949 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm05.local:9093"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: Upgrade: Updating mgr.y 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:07.012 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:06 vm05 ceph-mon[54361]: Deploying daemon mgr.y on vm05 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STOPPED 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STARTING 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Serving on http://:::9283 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STARTED 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STOPPING 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STOPPED 2026-03-09T15:34:07.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STARTING 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Serving on http://:::9283 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:06] ENGINE Bus STARTED 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='client.24949 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm05.local:9093"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://host.containers.internal:9095"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mon.? -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://host.containers.internal:3000"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: Upgrade: Updating mgr.y 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:06 vm09 ceph-mon[49358]: Deploying daemon mgr.y on vm05 2026-03-09T15:34:07.277 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 systemd[1]: Starting Ceph mgr.y for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 podman[82786]: 2026-03-09 15:34:07.276034326 +0000 UTC m=+0.020771227 container create db0211ba824d7b7b80e6d4f1aaf6b6fdac39f2cddfaf4ee431bf599e7a2f3901 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 podman[82786]: 2026-03-09 15:34:07.329050121 +0000 UTC m=+0.073787031 container init db0211ba824d7b7b80e6d4f1aaf6b6fdac39f2cddfaf4ee431bf599e7a2f3901 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 podman[82786]: 2026-03-09 15:34:07.331846783 +0000 UTC m=+0.076583693 container start db0211ba824d7b7b80e6d4f1aaf6b6fdac39f2cddfaf4ee431bf599e7a2f3901 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 bash[82786]: db0211ba824d7b7b80e6d4f1aaf6b6fdac39f2cddfaf4ee431bf599e7a2f3901 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 podman[82786]: 2026-03-09 15:34:07.263976271 +0000 UTC m=+0.008713191 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 systemd[1]: Started Ceph mgr.y for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:07.446+0000 7fdad5190140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:34:07.694 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:07.502+0000 7fdad5190140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:34:08.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:07.997+0000 7fdad5190140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:08 vm05 ceph-mon[49764]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:08 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:08 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:08.388+0000 7fdad5190140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:08 vm05 ceph-mon[54361]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:08 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:08.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:08 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:08.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:08 vm09 ceph-mon[49358]: pgmap v23: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T15:34:08.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:08 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:08.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:08 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:08.816 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:34:08.816 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:34:08.816 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: from numpy import show_config as show_numpy_config 2026-03-09T15:34:08.816 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:08.514+0000 7fdad5190140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:34:08.816 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:08.559+0000 7fdad5190140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:34:08.816 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:08.645+0000 7fdad5190140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:34:09.472 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.337+0000 7fdad5190140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:34:09.472 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.465+0000 7fdad5190140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:09.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.510+0000 7fdad5190140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:34:09.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.550+0000 7fdad5190140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:09.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.599+0000 7fdad5190140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:34:09.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.646+0000 7fdad5190140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.157 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.850+0000 7fdad5190140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:09.912+0000 7fdad5190140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.158 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:10 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: pgmap v24: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:10 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:10.409 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.184+0000 7fdad5190140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:34:10.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.554+0000 7fdad5190140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:34:10.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.602+0000 7fdad5190140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:34:10.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.651+0000 7fdad5190140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:34:11.005 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.743+0000 7fdad5190140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:34:11.005 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.784+0000 7fdad5190140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:34:11.005 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:10 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:10.881+0000 7fdad5190140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:34:11.005 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:11.001+0000 7fdad5190140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:11.311 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:11 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:11.150+0000 7fdad5190140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:11.195+0000 7fdad5190140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:11] ENGINE Bus STARTING 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: CherryPy Checker: 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: The Application mounted at '' has an empty config. 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:11] ENGINE Serving on http://:::9283 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:11 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:11.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:11 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:11.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:11] ENGINE Bus STARTED 2026-03-09T15:34:12.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: Standby manager daemon y restarted 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: Standby manager daemon y started 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: Failing over to other MGR 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:12 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: ignoring --setuser ceph since I am not root 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: ignoring --setgroup ceph since I am not root 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:12.201+0000 7f03dfb43140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:34:12.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:12.257+0000 7f03dfb43140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: Standby manager daemon y restarted 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: Standby manager daemon y started 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: Failing over to other MGR 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T15:34:12.319 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[49764]: Standby manager daemon x started 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:12] ENGINE Bus STOPPING 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: pgmap v25: 161 pgs: 161 active+clean; 457 KiB data, 99 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: Standby manager daemon y restarted 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: Standby manager daemon y started 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/crt"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/y/key"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: Upgrade: Need to upgrade myself (mgr.x) 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: Failing over to other MGR 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 192.168.123.109:0/1424714283' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd=[{"prefix": "mgr fail", "who": "x"}]: dispatch 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T15:34:12.320 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:12 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:34:12.653 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:12] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:34:12.653 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:12] ENGINE Bus STOPPED 2026-03-09T15:34:12.654 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:12] ENGINE Bus STARTING 2026-03-09T15:34:12.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:12] ENGINE Serving on http://:::9283 2026-03-09T15:34:12.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:12] ENGINE Bus STARTED 2026-03-09T15:34:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.24854 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T15:34:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: mgrmap e26: y(active, starting, since 0.753331s), standbys: x 2026-03-09T15:34:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: Manager daemon y is now available 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:13 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:34:13.064 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:12.687+0000 7f03dfb43140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:34:13.314 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:13.082+0000 7f03dfb43140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:34:13.315 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:34:13.315 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:34:13.315 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: from numpy import show_config as show_numpy_config 2026-03-09T15:34:13.315 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:13.185+0000 7f03dfb43140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:34:13.315 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:13.231+0000 7f03dfb43140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.24854 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: mgrmap e26: y(active, starting, since 0.753331s), standbys: x 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: Manager daemon y is now available 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.24854 ' entity='mgr.x' cmd='[{"prefix": "mgr fail", "who": "x"}]': finished 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: mgrmap e26: y(active, starting, since 0.753331s), standbys: x 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:34:13.323 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: Manager daemon y is now available 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:34:13.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:13 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:34:13.677 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:13.506Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:13.677 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:13.507Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:13.677 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:13.508Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:13.677 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:13.508Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:13.812 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:13.323+0000 7f03dfb43140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:34:14.162 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:13.920+0000 7f03dfb43140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:34:14.162 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.056+0000 7f03dfb43140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:14.162 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.098+0000 7f03dfb43140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:34:14.162 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.141+0000 7f03dfb43140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:14.484 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:14 vm09 ceph-mon[49358]: mgrmap e27: y(active, since 2s), standbys: x 2026-03-09T15:34:14.484 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:14 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:13] ENGINE Bus STARTING 2026-03-09T15:34:14.484 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:14 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:14.484 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:14 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:14.485 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.204+0000 7f03dfb43140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:34:14.485 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.243+0000 7f03dfb43140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[49764]: mgrmap e27: y(active, since 2s), standbys: x 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[49764]: [09/Mar/2026:15:34:13] ENGINE Bus STARTING 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[54361]: mgrmap e27: y(active, since 2s), standbys: x 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:13] ENGINE Bus STARTING 2026-03-09T15:34:14.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:14.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:14 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:14.813 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.482+0000 7f03dfb43140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:34:14.813 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.544+0000 7f03dfb43140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:15.165 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:14.830+0000 7f03dfb43140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:34:15.165 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.163+0000 7f03dfb43140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:34:15.507 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:13] ENGINE Serving on http://192.168.123.105:8765 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:13] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:13] ENGINE Bus STARTED 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:13] ENGINE Client ('192.168.123.105', 40154) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:15 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.207+0000 7f03dfb43140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.270+0000 7f03dfb43140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.367+0000 7f03dfb43140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:34:15.508 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.413+0000 7f03dfb43140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: [09/Mar/2026:15:34:13] ENGINE Serving on http://192.168.123.105:8765 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: [09/Mar/2026:15:34:13] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: [09/Mar/2026:15:34:13] ENGINE Bus STARTED 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: [09/Mar/2026:15:34:13] ENGINE Client ('192.168.123.105', 40154) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:15.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:13] ENGINE Serving on http://192.168.123.105:8765 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:13] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:13] ENGINE Bus STARTED 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:13] ENGINE Client ('192.168.123.105', 40154) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:15.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:15 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:15.798 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.505+0000 7f03dfb43140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:34:15.798 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.633+0000 7f03dfb43140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.797+0000 7f03dfb43140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:15.843+0000 7f03dfb43140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:15] ENGINE Bus STARTING 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: CherryPy Checker: 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: The Application mounted at '' has an empty config. 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:15] ENGINE Serving on http://:::9283 2026-03-09T15:34:16.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[68905]: [09/Mar/2026:15:34:15] ENGINE Bus STARTED 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: Standby manager daemon x restarted 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: Standby manager daemon x started 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: mgrmap e28: y(active, since 4s), standbys: x 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: Standby manager daemon x restarted 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: mgrmap e28: y(active, since 4s), standbys: x 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:16.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:16 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:16.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: Standby manager daemon x restarted 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/1296039047' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: mgrmap e28: y(active, since 4s), standbys: x 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:16.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:16 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:17.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:16.947Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='client.24949 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='client.24949 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:17.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:34:17.488 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:17 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='client.24949 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: pgmap v5: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.client.admin.keyring 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:34:17.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:17 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:18.345 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 systemd[1]: Stopping Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: Reconfiguring iscsi.foo.vm05.rfsich (dependencies changed)... 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: Reconfiguring daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:18.415 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:18 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/973556961' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: Reconfiguring iscsi.foo.vm05.rfsich (dependencies changed)... 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: Reconfiguring daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/973556961' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.client.admin.keyring 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: Reconfiguring iscsi.foo.vm05.rfsich (dependencies changed)... 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: Reconfiguring daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:18 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/973556961' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:964 level=warn msg="Received SIGTERM, exiting gracefully..." 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:988 level=info msg="Stopping scrape discovery manager..." 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:1002 level=info msg="Stopping notify discovery manager..." 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=manager.go:177 level=info component="rule manager" msg="Stopping rule manager..." 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=manager.go:187 level=info component="rule manager" msg="Rule manager stopped" 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:1039 level=info msg="Stopping scrape manager..." 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:984 level=info msg="Scrape discovery manager stopped" 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:998 level=info msg="Notify discovery manager stopped" 2026-03-09T15:34:18.667 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.413Z caller=main.go:1031 level=info msg="Scrape manager stopped" 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.415Z caller=notifier.go:618 level=info component=notifier msg="Stopping notification manager..." 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.415Z caller=main.go:1261 level=info msg="Notifier manager stopped" 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[72170]: ts=2026-03-09T15:34:18.415Z caller=main.go:1273 level=info msg="See you next time!" 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 podman[74211]: 2026-03-09 15:34:18.426839754 +0000 UTC m=+0.031768915 container died bd1ff20936ae321db19ac40fc96e1fd5dda76d2d814c8b0e8a9e45d9dddd53f3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 podman[74211]: 2026-03-09 15:34:18.443886845 +0000 UTC m=+0.048816006 container remove bd1ff20936ae321db19ac40fc96e1fd5dda76d2d814c8b0e8a9e45d9dddd53f3 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 bash[74211]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@prometheus.a.service: Deactivated successfully. 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 systemd[1]: Stopped Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 systemd[1]: Starting Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 podman[74280]: 2026-03-09 15:34:18.637020803 +0000 UTC m=+0.019877573 container create 737f11649a72bc079b63c01e380a721de05a6f0a0a80dbaa8f0ae4cbdbd15a55 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:18.668 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 podman[74280]: 2026-03-09 15:34:18.661953704 +0000 UTC m=+0.044810494 container init 737f11649a72bc079b63c01e380a721de05a6f0a0a80dbaa8f0ae4cbdbd15a55 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:18.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:18] ENGINE Bus STOPPING 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 podman[74280]: 2026-03-09 15:34:18.665592923 +0000 UTC m=+0.048449693 container start 737f11649a72bc079b63c01e380a721de05a6f0a0a80dbaa8f0ae4cbdbd15a55 (image=quay.io/prometheus/prometheus:v2.51.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a, maintainer=The Prometheus Authors ) 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 bash[74280]: 737f11649a72bc079b63c01e380a721de05a6f0a0a80dbaa8f0ae4cbdbd15a55 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 podman[74280]: 2026-03-09 15:34:18.628404309 +0000 UTC m=+0.011261088 image pull 1d3b7f56885b6dd623f1785be963aa9c195f86bc256ea454e8d02a7980b79c53 quay.io/prometheus/prometheus:v2.51.0 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 systemd[1]: Started Ceph prometheus.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.691Z caller=main.go:617 level=info msg="Starting Prometheus Server" mode=server version="(version=2.51.0, branch=HEAD, revision=c05c15512acb675e3f6cd662a6727854e93fc024)" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.691Z caller=main.go:622 level=info build_context="(go=go1.22.1, platform=linux/amd64, user=root@b5723e458358, date=20240319-10:54:45, tags=netgo,builtinassets,stringlabels)" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.691Z caller=main.go:623 level=info host_details="(Linux 5.14.0-686.el9.x86_64 #1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026 x86_64 vm09 (none))" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.691Z caller=main.go:624 level=info fd_limits="(soft=1048576, hard=1048576)" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.691Z caller=main.go:625 level=info vm_limits="(soft=unlimited, hard=unlimited)" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.694Z caller=web.go:568 level=info component=web msg="Start listening for connections" address=:9095 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.695Z caller=main.go:1129 level=info msg="Starting TSDB ..." 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.696Z caller=tls_config.go:313 level=info component=web msg="Listening on" address=[::]:9095 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.696Z caller=tls_config.go:316 level=info component=web msg="TLS is disabled." http2=false address=[::]:9095 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.697Z caller=head.go:616 level=info component=tsdb msg="Replaying on-disk memory mappable chunks if any" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.697Z caller=head.go:698 level=info component=tsdb msg="On-disk memory mappable chunks replay completed" duration=1.412µs 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.697Z caller=head.go:706 level=info component=tsdb msg="Replaying WAL, this may take a while" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.710Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=0 maxSegment=3 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.725Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=1 maxSegment=3 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.726Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=2 maxSegment=3 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.727Z caller=head.go:778 level=info component=tsdb msg="WAL segment loaded" segment=3 maxSegment=3 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.727Z caller=head.go:815 level=info component=tsdb msg="WAL replay completed" checkpoint_replay_duration=137.317µs wal_replay_duration=29.184533ms wbl_replay_duration=131ns total_replay_duration=29.335484ms 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.728Z caller=main.go:1150 level=info fs_type=XFS_SUPER_MAGIC 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.728Z caller=main.go:1153 level=info msg="TSDB started" 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.728Z caller=main.go:1335 level=info msg="Loading configuration file" filename=/etc/prometheus/prometheus.yml 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.741Z caller=main.go:1372 level=info msg="Completed loading of configuration file" filename=/etc/prometheus/prometheus.yml totalDuration=12.439916ms db_storage=862ns remote_storage=1.042µs web_handler=301ns query_engine=792ns scrape=1.947082ms scrape_sd=69.038µs notify=6.832µs notify_sd=9.598µs rules=10.06223ms tracing=3.577µs 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.741Z caller=main.go:1114 level=info msg="Server is ready to receive web requests." 2026-03-09T15:34:19.002 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:18.741Z caller=manager.go:163 level=info component="rule manager" msg="Starting rule manager..." 2026-03-09T15:34:19.379 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: Reconfiguring daemon prometheus.a on vm09 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:19.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm09.local:9095"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:19 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:19] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:19 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:19] ENGINE Bus STOPPED 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:19 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:19] ENGINE Bus STARTING 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:19 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:19] ENGINE Serving on http://:::9283 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:19 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:19] ENGINE Bus STARTED 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: Reconfiguring daemon prometheus.a on vm09 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm09.local:9095"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:19.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:19 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: Reconfiguring prometheus.a (dependencies changed)... 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: Reconfiguring daemon prometheus.a on vm09 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm09.local:9095"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:19.615 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:19 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (25s) 5s ago 5m 15.7M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (14s) 5s ago 5m 37.7M - dad864ee21e9 6a58314a043e 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 starting - - - - 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283 running (44s) 5s ago 7m 128M - 19.2.3-678-ge911bdeb 654f31e6858e d5e0fdba3128 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (12s) 5s ago 7m 528M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (7m) 5s ago 7m 56.5M 2048M 17.2.0 e1d6a67b021e b31ac3c66976 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (7m) 5s ago 7m 49.6M 2048M 17.2.0 e1d6a67b021e 0fedbaac50c3 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (7m) 5s ago 7m 45.8M 2048M 17.2.0 e1d6a67b021e 43bd063ebab2 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (29s) 5s ago 5m 8958k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (21s) 5s ago 5m 8757k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (6m) 5s ago 6m 54.2M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (6m) 5s ago 6m 52.4M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (6m) 5s ago 6m 49.2M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (6m) 5s ago 6m 53.9M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (6m) 5s ago 6m 52.2M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (6m) 5s ago 6m 50.1M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (6m) 5s ago 6m 50.3M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (5m) 5s ago 5m 50.5M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 starting - - - - 2026-03-09T15:34:19.825 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (5m) 5s ago 5m 91.2M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:34:19.826 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (5m) 5s ago 5m 91.0M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:34:19.826 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (5m) 5s ago 5m 92.6M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:34:19.826 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (5m) 5s ago 5m 89.7M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 systemd[1]: Stopping Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 podman[74537]: 2026-03-09 15:34:19.733065834 +0000 UTC m=+0.060293458 container died d5e0fdba3128bbe7771eba33c9da20b451ae017e438617a3fb5f93d9a8177924 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 podman[74537]: 2026-03-09 15:34:19.761972859 +0000 UTC m=+0.089200483 container remove d5e0fdba3128bbe7771eba33c9da20b451ae017e438617a3fb5f93d9a8177924 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2) 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 bash[74537]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service: Failed with result 'exit-code'. 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 systemd[1]: Stopped Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:19.934 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:19 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service: Consumed 12.076s CPU time. 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 3 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "mds": {}, 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:34:20.077 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:20.078 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:34:20.078 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 15, 2026-03-09T15:34:20.078 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:34:20.078 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:34:20.078 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 systemd[1]: Starting Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 podman[74646]: 2026-03-09 15:34:20.165342337 +0000 UTC m=+0.024580707 container create dd2d7e10f3aad955fe69185fd975b75decee9b70f87572b850b081396d5c0888 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223) 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 podman[74646]: 2026-03-09 15:34:20.214164803 +0000 UTC m=+0.073403184 container init dd2d7e10f3aad955fe69185fd975b75decee9b70f87572b850b081396d5c0888 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 podman[74646]: 2026-03-09 15:34:20.217849186 +0000 UTC m=+0.077087556 container start dd2d7e10f3aad955fe69185fd975b75decee9b70f87572b850b081396d5c0888 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True) 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 bash[74646]: dd2d7e10f3aad955fe69185fd975b75decee9b70f87572b850b081396d5c0888 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 podman[74646]: 2026-03-09 15:34:20.155621062 +0000 UTC m=+0.014859441 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:20.313 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 systemd[1]: Started Ceph mgr.x for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [], 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "", 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading mgr daemons", 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:34:20.314 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm09.local:9095"}]: dispatch 2026-03-09T15:34:20.362 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: Upgrade: Updating mgr.x 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: Deploying daemon mgr.x on vm09 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='client.15165 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/2916444095' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm09.local:9095"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: Upgrade: Updating mgr.x 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: Deploying daemon mgr.x on vm09 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='client.15165 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/2916444095' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:20.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:20 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:20.594 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:34:20.645 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-list"}]: dispatch 2026-03-09T15:34:20.645 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: Adding iSCSI gateway http://:@192.168.123.105:5000 to Dashboard 2026-03-09T15:34:20.645 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-iscsi-api-ssl-verification", "value": "true"}]: dispatch 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard iscsi-gateway-add", "name": "vm05"}]: dispatch 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm09.local:9095"}]: dispatch 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: Upgrade: Updating mgr.x 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: Deploying daemon mgr.x on vm09 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='client.15165 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/2916444095' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:20.646 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:20 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:20.647 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:20.340+0000 7fe3e7002140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:34:20.647 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:20.392+0000 7fe3e7002140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:34:21.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:20.939+0000 7fe3e7002140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[49764]: from='client.15171 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[49764]: from='client.25048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[49764]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[49764]: from='client.25057 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[49764]: from='client.? 192.168.123.105:0/3113857' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[54361]: from='client.15171 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[54361]: from='client.25048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[54361]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[54361]: from='client.25057 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:21 vm05 ceph-mon[54361]: from='client.? 192.168.123.105:0/3113857' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:34:21.617 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:21.348+0000 7fe3e7002140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:34:21.617 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:34:21.618 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:34:21.618 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: from numpy import show_config as show_numpy_config 2026-03-09T15:34:21.618 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:21.464+0000 7fe3e7002140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:34:21.618 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:21.516+0000 7fe3e7002140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:34:21.620 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:21 vm09 ceph-mon[49358]: from='client.15171 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.620 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:21 vm09 ceph-mon[49358]: from='client.25048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.620 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:21 vm09 ceph-mon[49358]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 27 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:34:21.621 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:21 vm09 ceph-mon[49358]: from='client.25057 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:21.621 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:21 vm09 ceph-mon[49358]: from='client.? 192.168.123.105:0/3113857' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:34:22.062 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:21.616+0000 7fe3e7002140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:34:22.455 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.186+0000 7fe3e7002140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:34:22.455 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.305+0000 7fe3e7002140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:22.455 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.351+0000 7fe3e7002140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:34:22.455 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.393+0000 7fe3e7002140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:22.722 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.451+0000 7fe3e7002140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:34:22.722 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.498+0000 7fe3e7002140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:34:22.978 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.727+0000 7fe3e7002140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:34:22.978 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:22.810+0000 7fe3e7002140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:22.978 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:22.978 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:22.979 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:22.979 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:22.979 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-09T15:34:22.979 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:22.979 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:22 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: pgmap v8: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 21 KiB/s rd, 0 B/s wr, 9 op/s 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:22 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:23.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.120+0000 7fe3e7002140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:34:23.721 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.448+0000 7fe3e7002140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:34:23.722 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.490+0000 7fe3e7002140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:34:23.722 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.539+0000 7fe3e7002140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:34:23.722 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.628+0000 7fe3e7002140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:34:23.722 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.668+0000 7fe3e7002140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:34:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:23.507Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:23.507Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:23.508Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:23.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:23.508Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:24.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.789+0000 7fe3e7002140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:34:24.063 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:23.918+0000 7fe3e7002140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:24.089+0000 7fe3e7002140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:24.130+0000 7fe3e7002140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: [09/Mar/2026:15:34:24] ENGINE Bus STARTING 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: CherryPy Checker: 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: The Application mounted at '' has an empty config. 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: [09/Mar/2026:15:34:24] ENGINE Serving on http://:::9283 2026-03-09T15:34:24.563 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: [09/Mar/2026:15:34:24] ENGINE Bus STARTED 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 systemd[1]: Stopping Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: Upgrade: Setting container_image for all mgr 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: Upgrade: It appears safe to stop mon.a 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: Standby manager daemon x restarted 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: Standby manager daemon x started 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: Upgrade: Updating mon.a 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[49764]: Deploying daemon mon.a on vm05 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a[49759]: 2026-03-09T15:34:24.876+0000 7f21933b4700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:34:25.024 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:24 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a[49759]: 2026-03-09T15:34:24.876+0000 7f21933b4700 -1 mon.a@0(leader) e3 *** Got Signal Terminated *** 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:24 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:24] ENGINE Bus STOPPING 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: Upgrade: Setting container_image for all mgr 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: Upgrade: It appears safe to stop mon.a 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: Standby manager daemon x restarted 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: Upgrade: Updating mon.a 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:25.026 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:24 vm05 ceph-mon[54361]: Deploying daemon mon.a on vm05 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: Upgrade: Setting container_image for all mgr 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.x"}]': finished 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mgr.y"}]': finished 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["a"]}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: Upgrade: It appears safe to stop mon.a 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: pgmap v9: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: Standby manager daemon x restarted 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/670718033' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: Upgrade: Updating mon.a 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:25.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:24 vm09 ceph-mon[49358]: Deploying daemon mon.a on vm05 2026-03-09T15:34:25.295 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 podman[86366]: 2026-03-09 15:34:25.021970069 +0000 UTC m=+0.160168869 container died b31ac3c6697605a46f6e48fe77724f38c3a8fc53ea7c69605ffc8be894abd5a2 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, io.buildah.version=1.19.8, vendor=Red Hat, Inc., CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_CLEAN=True, architecture=x86_64, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, ceph=True, release=754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, distribution-scope=public, maintainer=Guillaume Abrioux , vcs-type=git, name=centos-stream, version=8, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, io.openshift.tags=base centos centos-stream, io.k8s.display-name=CentOS Stream 8, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container) 2026-03-09T15:34:25.295 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 podman[86366]: 2026-03-09 15:34:25.045438992 +0000 UTC m=+0.183637792 container remove b31ac3c6697605a46f6e48fe77724f38c3a8fc53ea7c69605ffc8be894abd5a2 (image=quay.io/ceph/ceph:v17.2.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, com.redhat.component=centos-stream-container, distribution-scope=public, architecture=x86_64, name=centos-stream, vcs-type=git, GIT_CLEAN=True, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, io.openshift.expose-services=, RELEASE=HEAD, maintainer=Guillaume Abrioux , summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, build-date=2022-05-03T08:36:31.336870, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, ceph=True, io.openshift.tags=base centos centos-stream, io.buildah.version=1.19.8, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754) 2026-03-09T15:34:25.295 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 bash[86366]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a 2026-03-09T15:34:25.295 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service: Deactivated successfully. 2026-03-09T15:34:25.295 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 systemd[1]: Stopped Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:25.295 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service: Consumed 8.273s CPU time. 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 systemd[1]: Starting Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 podman[86484]: 2026-03-09 15:34:25.413367271 +0000 UTC m=+0.020323461 container create 3fa7c78f895272d79b001056788f41b4aabd674b4bb700ec7286817898aee0cd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 podman[86484]: 2026-03-09 15:34:25.448726695 +0000 UTC m=+0.055682895 container init 3fa7c78f895272d79b001056788f41b4aabd674b4bb700ec7286817898aee0cd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default) 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 podman[86484]: 2026-03-09 15:34:25.452447122 +0000 UTC m=+0.059403312 container start 3fa7c78f895272d79b001056788f41b4aabd674b4bb700ec7286817898aee0cd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, ceph=True, io.buildah.version=1.41.3, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 bash[86484]: 3fa7c78f895272d79b001056788f41b4aabd674b4bb700ec7286817898aee0cd 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 podman[86484]: 2026-03-09 15:34:25.404946717 +0000 UTC m=+0.011902917 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:25.738 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 systemd[1]: Started Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: pidfile_write: ignore empty --pid-file 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: load: jerasure load: lrc 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: RocksDB version: 7.9.2 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Git sha 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: DB SUMMARY 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: DB Session ID: SPW7BVG6P4XEXN1ZPTKH 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: CURRENT file: CURRENT 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: MANIFEST file: MANIFEST-000015 size: 593 Bytes 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: SST files in /var/lib/ceph/mon/ceph-a/store.db dir, Total Num: 1, files: 000024.sst 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-a/store.db: 000022.log size: 774781 ; 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.error_if_exists: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.create_if_missing: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.paranoid_checks: 1 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.env: 0x556911a35dc0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.info_log: 0x55691377a5c0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.statistics: (nil) 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.use_fsync: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_log_file_size: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.allow_fallocate: 1 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.use_direct_reads: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T15:34:25.739 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.db_log_dir: 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.wal_dir: 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.write_buffer_manager: 0x55691377f900 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.unordered_write: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.row_cache: None 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.wal_filter: None 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.two_write_queues: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.wal_compression: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.atomic_flush: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.log_readahead_size: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_background_jobs: 2 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_background_compactions: -1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_subcompactions: 1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_open_files: -1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_background_flushes: -1 2026-03-09T15:34:25.740 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Compression algorithms supported: 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kZSTD supported: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kXpressCompression supported: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kBZip2Compression supported: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kLZ4Compression supported: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kZlibCompression supported: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: kSnappyCompression supported: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.merge_operator: 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_filter: None 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55691377a5a0) 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: cache_index_and_filter_blocks: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: pin_top_level_index_and_filter: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: index_type: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: data_block_index_type: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: index_shortening: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: checksum: 4 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: no_block_cache: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache: 0x55691379f350 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache_name: BinnedLRUCache 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache_options: 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: capacity : 536870912 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: num_shard_bits : 4 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: strict_capacity_limit : 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: high_pri_pool_ratio: 0.000 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_cache_compressed: (nil) 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: persistent_cache: (nil) 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_size: 4096 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_size_deviation: 10 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_restart_interval: 16 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: index_block_restart_interval: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: metadata_block_size: 4096 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: partition_filters: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: use_delta_encoding: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: filter_policy: bloomfilter 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: whole_key_filtering: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: verify_compression: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: read_amp_bytes_per_bit: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: format_version: 5 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: enable_index_compression: 1 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: block_align: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: max_auto_readahead_size: 262144 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: prepopulate_block_cache: 0 2026-03-09T15:34:25.741 INFO:journalctl@ceph.mon.a.vm05.stdout: initial_auto_readahead_size: 8192 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression: NoCompression 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.num_levels: 7 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T15:34:25.742 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.inplace_update_support: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.bloom_locality: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.max_successive_merges: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:25] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:25] ENGINE Bus STOPPED 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:25] ENGINE Bus STARTING 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:25] ENGINE Serving on http://:::9283 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:25 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:25] ENGINE Bus STARTED 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.ttl: 2592000 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.enable_blob_files: false 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.min_blob_size: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 24.sst 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-a/store.db/MANIFEST-000015 succeeded,manifest_file_number is 15, next_file_number is 26, last_sequence is 9846, log_number is 22,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 22 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 7f44600a-9590-4b00-b146-56f8708c2222 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070465487822, "job": 1, "event": "recovery_started", "wal_files": [22]} 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #22 mode 2 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070465492087, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 27, "file_size": 660932, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 9837, "largest_seqno": 10216, "table_properties": {"data_size": 658045, "index_size": 1340, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 5324, "raw_average_key_size": 26, "raw_value_size": 653372, "raw_average_value_size": 3202, "num_data_blocks": 60, "num_entries": 204, "num_filter_entries": 204, "num_deletions": 2, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773070465, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "7f44600a-9590-4b00-b146-56f8708c2222", "db_session_id": "SPW7BVG6P4XEXN1ZPTKH", "orig_file_number": 27, "seqno_to_time_mapping": "N/A"}} 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070465492145, "job": 1, "event": "recovery_finished"} 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/version_set.cc:5047] Creating manifest 29 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T15:34:25.743 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-a/store.db/000022.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5569137a0e00 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: DB pointer 0x5569138b6000 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: ** DB Stats ** 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: L0 1/0 645.44 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 189.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: L6 1/0 9.96 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Sum 2/0 10.59 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 189.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 189.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 189.0 0.00 0.00 1 0.003 0 0 0.0 0.0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Flush(GB): cumulative 0.001, interval 0.001 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Cumulative compaction: 0.00 GB write, 44.06 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Interval compaction: 0.00 GB write, 44.06 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Block cache BinnedLRUCache@0x55691379f350#2 capacity: 512.00 MB usage: 37.23 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 2.5e-05 secs_since: 0 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,9.62 KB,0.00183582%) IndexBlock(2,21.53 KB,0.00410676%) Misc(2,6.08 KB,0.00115931%) 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: starting mon.a rank 0 at public addrs [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] at bind addrs [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon_data /var/lib/ceph/mon/ceph-a fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???) e3 preinit fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???).mds e1 new map 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???).mds e1 print_map 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: e1 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: legacy client fscid: -1 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout: No filesystems configured 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???).osd e86 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:25.744 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:25 vm05 ceph-mon[86498]: mon.a@-1(???).osd e86 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ignoring --setuser ceph since I am not root 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ignoring --setgroup ceph since I am not root 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-mgr[82800]: -- 192.168.123.105:0/2801560652 <== mon.1 v2:192.168.123.105:3301/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x559096bbb4a0 con 0x559096b99400 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:27.077+0000 7fef7f04a140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:27.124+0000 7fef7f04a140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: mon.a calling monitor election 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: monmap epoch 3 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: last_changed 2026-03-09T15:27:07.289243+0000 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: min_mon_release 17 (quincy) 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: election_strategy: 1 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:27.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: fsmap 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: mgrmap e28: y(active, since 15s), standbys: x 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: overall HEALTH_OK 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: from='mgr.25000 ' entity='' 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: Standby manager daemon x restarted 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: Standby manager daemon x started 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[86498]: mgrmap e29: y(active, since 15s), standbys: x 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: mon.a calling monitor election 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: monmap epoch 3 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: last_changed 2026-03-09T15:27:07.289243+0000 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: min_mon_release 17 (quincy) 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: election_strategy: 1 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: fsmap 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: mgrmap e28: y(active, since 15s), standbys: x 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: overall HEALTH_OK 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: from='mgr.25000 ' entity='' 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: Standby manager daemon x restarted 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:34:27.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:26 vm05 ceph-mon[54361]: mgrmap e29: y(active, since 15s), standbys: x 2026-03-09T15:34:27.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: ignoring --setuser ceph since I am not root 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: ignoring --setgroup ceph since I am not root 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:27 vm09 ceph-mgr[74660]: -- 192.168.123.109:0/3129968317 <== mon.1 v2:192.168.123.105:3301/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55e6873d54a0 con 0x55e6873b3400 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:27.068+0000 7f981f9d4140 -1 mgr[py] Module alerts has missing NOTIFY_TYPES member 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:27.116+0000 7f981f9d4140 -1 mgr[py] Module balancer has missing NOTIFY_TYPES member 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: from='mgr.25000 192.168.123.105:0/1783450766' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: mon.a calling monitor election 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: pgmap v10: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: monmap epoch 3 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: last_changed 2026-03-09T15:27:07.289243+0000 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: min_mon_release 17 (quincy) 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: election_strategy: 1 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: fsmap 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: osdmap e86: 8 total, 8 up, 8 in 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: mgrmap e28: y(active, since 15s), standbys: x 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: overall HEALTH_OK 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: from='mgr.25000 ' entity='' 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: Standby manager daemon x restarted 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:34:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:26 vm09 ceph-mon[49358]: mgrmap e29: y(active, since 15s), standbys: x 2026-03-09T15:34:27.924 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:27.561+0000 7fef7f04a140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:34:27.932 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:27.571+0000 7f981f9d4140 -1 mgr[py] Module crash has missing NOTIFY_TYPES member 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:27 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:27.922+0000 7fef7f04a140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: from numpy import show_config as show_numpy_config 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.025+0000 7fef7f04a140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.077+0000 7fef7f04a140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:34:28.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.155+0000 7fef7f04a140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:34:28.312 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:27.931+0000 7f981f9d4140 -1 mgr[py] Module devicehealth has missing NOTIFY_TYPES member 2026-03-09T15:34:28.325 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: /lib64/python3.9/site-packages/scipy/__init__.py:73: UserWarning: NumPy was imported from a Python sub-interpreter but NumPy does not properly support sub-interpreters. This will likely work for most users but might cause hard to track down issues or subtle bugs. A common user of the rare sub-interpreter feature is wsgi which also allows single-interpreter mode. 2026-03-09T15:34:28.325 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: Improvements in the case of bugs are welcome, but is not on the NumPy roadmap, and full support may require significant effort to achieve. 2026-03-09T15:34:28.325 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: from numpy import show_config as show_numpy_config 2026-03-09T15:34:28.325 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.033+0000 7f981f9d4140 -1 mgr[py] Module diskprediction_local has missing NOTIFY_TYPES member 2026-03-09T15:34:28.325 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.086+0000 7f981f9d4140 -1 mgr[py] Module influx has missing NOTIFY_TYPES member 2026-03-09T15:34:28.325 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.164+0000 7f981f9d4140 -1 mgr[py] Module iostat has missing NOTIFY_TYPES member 2026-03-09T15:34:28.984 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.727+0000 7fef7f04a140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:34:28.984 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.852+0000 7fef7f04a140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:28.984 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.897+0000 7fef7f04a140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:34:28.984 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.937+0000 7fef7f04a140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:28.992 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.736+0000 7f981f9d4140 -1 mgr[py] Module nfs has missing NOTIFY_TYPES member 2026-03-09T15:34:28.992 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.855+0000 7f981f9d4140 -1 mgr[py] Module orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:28.992 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.899+0000 7f981f9d4140 -1 mgr[py] Module osd_perf_query has missing NOTIFY_TYPES member 2026-03-09T15:34:28.992 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.936+0000 7f981f9d4140 -1 mgr[py] Module osd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:28.992 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:28.981+0000 7f981f9d4140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:34:29.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:28 vm05 ceph-mon[86498]: mgrmap e30: y(active, since 16s), standbys: x 2026-03-09T15:34:29.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:28.982+0000 7fef7f04a140 -1 mgr[py] Module pg_autoscaler has missing NOTIFY_TYPES member 2026-03-09T15:34:29.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.026+0000 7fef7f04a140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:34:29.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.204+0000 7fef7f04a140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:34:29.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:28 vm05 ceph-mon[54361]: mgrmap e30: y(active, since 16s), standbys: x 2026-03-09T15:34:29.271 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.023+0000 7f981f9d4140 -1 mgr[py] Module progress has missing NOTIFY_TYPES member 2026-03-09T15:34:29.271 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.206+0000 7f981f9d4140 -1 mgr[py] Module prometheus has missing NOTIFY_TYPES member 2026-03-09T15:34:29.271 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:28 vm09 ceph-mon[49358]: mgrmap e30: y(active, since 16s), standbys: x 2026-03-09T15:34:29.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.269+0000 7f981f9d4140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:29.562 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.513+0000 7f981f9d4140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:34:29.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.263+0000 7fef7f04a140 -1 mgr[py] Module rbd_support has missing NOTIFY_TYPES member 2026-03-09T15:34:29.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.501+0000 7fef7f04a140 -1 mgr[py] Module rgw has missing NOTIFY_TYPES member 2026-03-09T15:34:30.097 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.809+0000 7fef7f04a140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:34:30.097 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.856+0000 7fef7f04a140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:34:30.097 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.898+0000 7fef7f04a140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:34:30.097 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:29 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:29.975+0000 7fef7f04a140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:34:30.097 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:30.012+0000 7fef7f04a140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:34:30.127 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.825+0000 7f981f9d4140 -1 mgr[py] Module rook has missing NOTIFY_TYPES member 2026-03-09T15:34:30.127 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.865+0000 7f981f9d4140 -1 mgr[py] Module selftest has missing NOTIFY_TYPES member 2026-03-09T15:34:30.127 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.908+0000 7f981f9d4140 -1 mgr[py] Module snap_schedule has missing NOTIFY_TYPES member 2026-03-09T15:34:30.127 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:29.997+0000 7f981f9d4140 -1 mgr[py] Module status has missing NOTIFY_TYPES member 2026-03-09T15:34:30.127 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:30.035+0000 7f981f9d4140 -1 mgr[py] Module telegraf has missing NOTIFY_TYPES member 2026-03-09T15:34:30.374 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:30.095+0000 7fef7f04a140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:34:30.374 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:30.224+0000 7fef7f04a140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:30.409 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:30.124+0000 7f981f9d4140 -1 mgr[py] Module telemetry has missing NOTIFY_TYPES member 2026-03-09T15:34:30.410 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:30.259+0000 7f981f9d4140 -1 mgr[py] Module test_orchestrator has missing NOTIFY_TYPES member 2026-03-09T15:34:30.636 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:30.372+0000 7fef7f04a140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:30.412+0000 7fef7f04a140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:30] ENGINE Bus STARTING 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: CherryPy Checker: 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: The Application mounted at '' has an empty config. 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: Active manager daemon y restarted 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: Activating manager daemon y 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: mgrmap e31: y(active, starting, since 0.0147254s), standbys: x 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: Manager daemon y is now available 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: Standby manager daemon x restarted 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: Standby manager daemon x started 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[86498]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: Active manager daemon y restarted 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: Activating manager daemon y 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: mgrmap e31: y(active, starting, since 0.0147254s), standbys: x 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:34:30.637 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: Manager daemon y is now available 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: Standby manager daemon x restarted 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: Standby manager daemon x started 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:30.638 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:30 vm05 ceph-mon[54361]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:30.408+0000 7f981f9d4140 -1 mgr[py] Module volumes has missing NOTIFY_TYPES member 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:30.458+0000 7f981f9d4140 -1 mgr[py] Module zabbix has missing NOTIFY_TYPES member 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: [09/Mar/2026:15:34:30] ENGINE Bus STARTING 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: CherryPy Checker: 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: The Application mounted at '' has an empty config. 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: [09/Mar/2026:15:34:30] ENGINE Serving on http://:::9283 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mgr.x.vm09.stdout:Mar 09 15:34:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-x[74656]: [09/Mar/2026:15:34:30] ENGINE Bus STARTED 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: Active manager daemon y restarted 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: Activating manager daemon y 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: mgrmap e31: y(active, starting, since 0.0147254s), standbys: x 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "y", "id": "y"}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr metadata", "who": "x", "id": "x"}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:34:30.664 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mds metadata"}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata"}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata"}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: Manager daemon y is now available 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/crt"}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: Standby manager daemon x restarted 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: Standby manager daemon x started 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/crt"}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/x/key"}]: dispatch 2026-03-09T15:34:30.665 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:30 vm09 ceph-mon[49358]: from='mgr.? 192.168.123.109:0/2396205758' entity='mgr.x' cmd=[{"prefix": "config-key get", "key": "mgr/dashboard/key"}]: dispatch 2026-03-09T15:34:30.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:30] ENGINE Serving on http://:::9283 2026-03-09T15:34:30.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:30 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:34:30] ENGINE Bus STARTED 2026-03-09T15:34:31.475 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:31 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:31.453+0000 7fef4b3b6640 -1 mgr.server handle_report got status from non-daemon mon.a 2026-03-09T15:34:31.479 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:31 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:34:31.479 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:31 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:31.479 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:31 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:34:31.479 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:31 vm09 ceph-mon[49358]: mgrmap e32: y(active, since 1.01718s), standbys: x 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[86498]: mgrmap e32: y(active, since 1.01718s), standbys: x 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/mirror_snapshot_schedule"}]: dispatch 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/y/trash_purge_schedule"}]: dispatch 2026-03-09T15:34:31.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:31 vm05 ceph-mon[54361]: mgrmap e32: y(active, since 1.01718s), standbys: x 2026-03-09T15:34:32.918 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:34:32] "GET /metrics HTTP/1.1" 200 34987 "" "Prometheus/2.51.0" 2026-03-09T15:34:32.919 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:31] ENGINE Bus STARTING 2026-03-09T15:34:32.919 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:32.919 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: [09/Mar/2026:15:34:31] ENGINE Bus STARTING 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: [09/Mar/2026:15:34:31] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: [09/Mar/2026:15:34:31] ENGINE Client ('192.168.123.105', 44962) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: [09/Mar/2026:15:34:31] ENGINE Serving on http://192.168.123.105:8765 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: [09/Mar/2026:15:34:31] ENGINE Bus STARTED 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:31] ENGINE Bus STARTING 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: pgmap v3: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:31] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:31] ENGINE Client ('192.168.123.105', 44962) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:31] ENGINE Serving on http://192.168.123.105:8765 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: [09/Mar/2026:15:34:31] ENGINE Bus STARTED 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:32 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:31] ENGINE Serving on https://192.168.123.105:7150 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:31] ENGINE Client ('192.168.123.105', 44962) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:31] ENGINE Serving on http://192.168.123.105:8765 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: [09/Mar/2026:15:34:31] ENGINE Bus STARTED 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:32 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm05", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:33.922 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:33.509Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:33.922 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:33.509Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:33.922 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:33.510Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:33.923 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:33.510Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: mgrmap e33: y(active, since 2s), standbys: x 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:33.923 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:33 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: mgrmap e33: y(active, since 2s), standbys: x 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: pgmap v4: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: mgrmap e33: y(active, since 2s), standbys: x 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"}]: dispatch 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:33.926 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:33 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:34.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:34 vm05 ceph-mon[54361]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: Updating vm05:/etc/ceph/ceph.conf 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: Updating vm09:/etc/ceph/ceph.conf 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: Updating vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: Updating vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/config/ceph.conf 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["c"]}]: dispatch 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:34 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:35.654 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 systemd[1]: Stopping Ceph mon.c for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:35.654 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c[54357]: 2026-03-09T15:34:35.447+0000 7ff80640f700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:34:35.655 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c[54357]: 2026-03-09T15:34:35.447+0000 7ff80640f700 -1 mon.c@1(peon) e3 *** Got Signal Terminated *** 2026-03-09T15:34:35.655 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 podman[88197]: 2026-03-09 15:34:35.564764349 +0000 UTC m=+0.158477430 container died 43bd063ebab208e6329728e3fa19a9389e5240bc1758fb3c9dfe2feb4424ad83 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., version=8, CEPH_POINT_RELEASE=-17.2.0, io.buildah.version=1.19.8, build-date=2022-05-03T08:36:31.336870, distribution-scope=public, vendor=Red Hat, Inc., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, name=centos-stream, ceph=True, maintainer=Guillaume Abrioux , io.k8s.display-name=CentOS Stream 8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, architecture=x86_64, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, io.openshift.expose-services=, release=754, com.redhat.component=centos-stream-container, RELEASE=HEAD, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True) 2026-03-09T15:34:35.655 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 podman[88197]: 2026-03-09 15:34:35.586073608 +0000 UTC m=+0.179786689 container remove 43bd063ebab208e6329728e3fa19a9389e5240bc1758fb3c9dfe2feb4424ad83 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, build-date=2022-05-03T08:36:31.336870, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-type=git, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, architecture=x86_64, release=754, ceph=True, com.redhat.component=centos-stream-container, vendor=Red Hat, Inc., io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, maintainer=Guillaume Abrioux , name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.k8s.display-name=CentOS Stream 8, version=8, io.openshift.expose-services=, GIT_BRANCH=HEAD, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754) 2026-03-09T15:34:35.655 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 bash[88197]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c 2026-03-09T15:34:35.946 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.c.service: Deactivated successfully. 2026-03-09T15:34:35.946 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 systemd[1]: Stopped Ceph mon.c for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:35.946 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.c.service: Consumed 4.394s CPU time. 2026-03-09T15:34:35.946 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 systemd[1]: Starting Ceph mon.c for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:36.244 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 podman[88309]: 2026-03-09 15:34:35.944747246 +0000 UTC m=+0.017213254 container create c4256ae4b3f95b9026b3015a74bb0f5ce3df0a56f71ab044715c2534476b0a75 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-09T15:34:36.244 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 podman[88309]: 2026-03-09 15:34:35.982070439 +0000 UTC m=+0.054536466 container init c4256ae4b3f95b9026b3015a74bb0f5ce3df0a56f71ab044715c2534476b0a75 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-09T15:34:36.244 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 podman[88309]: 2026-03-09 15:34:35.985863 +0000 UTC m=+0.058329017 container start c4256ae4b3f95b9026b3015a74bb0f5ce3df0a56f71ab044715c2534476b0a75 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS) 2026-03-09T15:34:36.244 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 bash[88309]: c4256ae4b3f95b9026b3015a74bb0f5ce3df0a56f71ab044715c2534476b0a75 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 podman[88309]: 2026-03-09 15:34:35.937922343 +0000 UTC m=+0.010388370 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:35 vm05 systemd[1]: Started Ceph mon.c for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: pidfile_write: ignore empty --pid-file 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: load: jerasure load: lrc 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: RocksDB version: 7.9.2 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Git sha 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: DB SUMMARY 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: DB Session ID: OCSQDM6HOAZEEGOAAG05 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: CURRENT file: CURRENT 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: MANIFEST file: MANIFEST-000009 size: 517 Bytes 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: SST files in /var/lib/ceph/mon/ceph-c/store.db dir, Total Num: 1, files: 000018.sst 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-c/store.db: 000016.log size: 4338243 ; 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.error_if_exists: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.create_if_missing: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.paranoid_checks: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.env: 0x5649b3a69dc0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.info_log: 0x5649b5f2c5c0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.statistics: (nil) 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.use_fsync: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_log_file_size: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.allow_fallocate: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.use_direct_reads: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.db_log_dir: 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.wal_dir: 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.write_buffer_manager: 0x5649b5f31900 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T15:34:36.245 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.unordered_write: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.row_cache: None 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.wal_filter: None 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.two_write_queues: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.wal_compression: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.atomic_flush: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.log_readahead_size: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_background_jobs: 2 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_background_compactions: -1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_subcompactions: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_open_files: -1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_background_flushes: -1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Compression algorithms supported: 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kZSTD supported: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kXpressCompression supported: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kBZip2Compression supported: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kLZ4Compression supported: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kZlibCompression supported: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: kSnappyCompression supported: 1 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.merge_operator: 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_filter: None 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T15:34:36.246 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5649b5f2c5a0) 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: cache_index_and_filter_blocks: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: pin_top_level_index_and_filter: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: index_type: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: data_block_index_type: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: index_shortening: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: checksum: 4 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: no_block_cache: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_cache: 0x5649b5f51350 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_cache_name: BinnedLRUCache 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_cache_options: 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: capacity : 536870912 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: num_shard_bits : 4 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: strict_capacity_limit : 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: high_pri_pool_ratio: 0.000 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_cache_compressed: (nil) 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: persistent_cache: (nil) 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_size: 4096 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_size_deviation: 10 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_restart_interval: 16 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: index_block_restart_interval: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: metadata_block_size: 4096 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: partition_filters: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: use_delta_encoding: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: filter_policy: bloomfilter 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: whole_key_filtering: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: verify_compression: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: read_amp_bytes_per_bit: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: format_version: 5 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: enable_index_compression: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: block_align: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: max_auto_readahead_size: 262144 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: prepopulate_block_cache: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: initial_auto_readahead_size: 8192 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression: NoCompression 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.num_levels: 7 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T15:34:36.247 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.inplace_update_support: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.bloom_locality: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.max_successive_merges: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.ttl: 2592000 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T15:34:36.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.enable_blob_files: false 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.min_blob_size: 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 18.sst 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-c/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 20, last_sequence is 10428, log_number is 16,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 16 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: bf9b0660-63b6-4b1e-89d6-da8dc500f3f3 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070476015610, "job": 1, "event": "recovery_started", "wal_files": [16]} 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #16 mode 2 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070476028116, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 21, "file_size": 2650460, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 10409, "largest_seqno": 11213, "table_properties": {"data_size": 2645680, "index_size": 2717, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 1029, "raw_key_size": 9716, "raw_average_key_size": 24, "raw_value_size": 2636886, "raw_average_value_size": 6778, "num_data_blocks": 126, "num_entries": 389, "num_filter_entries": 389, "num_deletions": 2, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773070476, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "bf9b0660-63b6-4b1e-89d6-da8dc500f3f3", "db_session_id": "OCSQDM6HOAZEEGOAAG05", "orig_file_number": 21, "seqno_to_time_mapping": "N/A"}} 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070476028432, "job": 1, "event": "recovery_finished"} 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/version_set.cc:5047] Creating manifest 23 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-c/store.db/000016.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5649b5f52e00 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: DB pointer 0x5649b606c000 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: starting mon.c rank 1 at public addrs [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] at bind addrs [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon_data /var/lib/ceph/mon/ceph-c fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: ** DB Stats ** 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: L0 1/0 2.53 MB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 304.3 0.01 0.00 1 0.008 0 0 0.0 0.0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: L6 1/0 9.96 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Sum 2/0 12.49 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 304.3 0.01 0.00 1 0.008 0 0 0.0 0.0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 304.3 0.01 0.00 1 0.008 0 0 0.0 0.0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: ** Compaction Stats [default] ** 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 304.3 0.01 0.00 1 0.008 0 0 0.0 0.0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Flush(GB): cumulative 0.002, interval 0.002 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Cumulative compaction: 0.00 GB write, 136.03 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:34:36.249 INFO:journalctl@ceph.mon.c.vm05.stdout: Interval compaction: 0.00 GB write, 136.03 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: Block cache BinnedLRUCache@0x5649b5f51350#2 capacity: 512.00 MB usage: 48.33 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.2e-05 secs_since: 0 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: Block cache entry stats(count,size,portion): DataBlock(3,15.23 KB,0.00290573%) FilterBlock(2,10.19 KB,0.00194311%) IndexBlock(2,22.91 KB,0.00436902%) Misc(1,0.00 KB,0%) 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???) e3 preinit fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).mds e1 new map 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).mds e1 print_map 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: e1 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: legacy client fscid: -1 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout: No filesystems configured 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).osd e87 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).osd e87 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).osd e87 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).osd e87 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:36.250 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:36 vm05 ceph-mon[88323]: mon.c@-1(???).paxosservice(auth 1..21) refresh upgraded, format 0 -> 3 2026-03-09T15:34:37.118 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: mon.c calling monitor election 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: mon.a calling monitor election 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: monmap epoch 3 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: last_changed 2026-03-09T15:27:07.289243+0000 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: min_mon_release 17 (quincy) 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: election_strategy: 1 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: fsmap 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: mgrmap e34: y(active, since 5s), standbys: x 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: overall HEALTH_OK 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:37.119 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[86498]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: mon.c calling monitor election 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: mon.a calling monitor election 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: monmap epoch 3 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: last_changed 2026-03-09T15:27:07.289243+0000 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: min_mon_release 17 (quincy) 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: election_strategy: 1 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: fsmap 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: mgrmap e34: y(active, since 5s), standbys: x 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: overall HEALTH_OK 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:37.120 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:37 vm05 ceph-mon[88323]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:37.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: mon.c calling monitor election 2026-03-09T15:34:37.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: mon.a calling monitor election 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: monmap epoch 3 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: last_changed 2026-03-09T15:27:07.289243+0000 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: min_mon_release 17 (quincy) 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: election_strategy: 1 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: fsmap 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: mgrmap e34: y(active, since 5s), standbys: x 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: overall HEALTH_OK 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:37.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:37 vm09 ceph-mon[49358]: pgmap v6: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:34:37.313 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:38.845 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:38.845 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:38.845 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:38.845 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:38 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:38 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:38 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:39.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:38 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:40.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: pgmap v7: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 26 KiB/s rd, 0 B/s wr, 11 op/s 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "quorum_status"}]: dispatch 2026-03-09T15:34:40.237 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:39 vm09 ceph-mon[49358]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon ok-to-stop", "ids": ["b"]}]: dispatch 2026-03-09T15:34:40.534 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 systemd[1]: Stopping Ceph mon.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b[49354]: 2026-03-09T15:34:40.603+0000 7f76624d8700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b[49354]: 2026-03-09T15:34:40.603+0000 7f76624d8700 -1 mon.b@2(peon) e3 *** Got Signal Terminated *** 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 podman[77175]: 2026-03-09 15:34:40.672771848 +0000 UTC m=+0.083901167 container died 0fedbaac50c32657f591f3cb03ef80f65e56e19805d53501b3c0d1ac1d19f2ef (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, maintainer=Guillaume Abrioux , GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, CEPH_POINT_RELEASE=-17.2.0, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, architecture=x86_64, name=centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, vcs-type=git, version=8, distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, io.buildah.version=1.19.8, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., com.redhat.component=centos-stream-container) 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 podman[77175]: 2026-03-09 15:34:40.692968134 +0000 UTC m=+0.104097463 container remove 0fedbaac50c32657f591f3cb03ef80f65e56e19805d53501b3c0d1ac1d19f2ef (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_REPO=https://github.com/ceph/ceph-container.git, io.k8s.display-name=CentOS Stream 8, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870, ceph=True, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, GIT_CLEAN=True, vendor=Red Hat, Inc., version=8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, RELEASE=HEAD, io.openshift.tags=base centos centos-stream, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, GIT_BRANCH=HEAD, io.buildah.version=1.19.8, release=754, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, vcs-type=git, architecture=x86_64, com.redhat.component=centos-stream-container, distribution-scope=public, maintainer=Guillaume Abrioux ) 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 bash[77175]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.b.service: Deactivated successfully. 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 systemd[1]: Stopped Ceph mon.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:40.787 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.b.service: Consumed 6.471s CPU time. 2026-03-09T15:34:41.060 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:40 vm09 systemd[1]: Starting Ceph mon.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:41.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 podman[77283]: 2026-03-09 15:34:41.058867181 +0000 UTC m=+0.017371427 container create 60013cd0d65b34dec5f404979b5fa447448d4fe627f91ff2776a667e88ef54e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 podman[77283]: 2026-03-09 15:34:41.092994235 +0000 UTC m=+0.051498471 container init 60013cd0d65b34dec5f404979b5fa447448d4fe627f91ff2776a667e88ef54e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 podman[77283]: 2026-03-09 15:34:41.096242624 +0000 UTC m=+0.054746861 container start 60013cd0d65b34dec5f404979b5fa447448d4fe627f91ff2776a667e88ef54e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 bash[77283]: 60013cd0d65b34dec5f404979b5fa447448d4fe627f91ff2776a667e88ef54e5 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 podman[77283]: 2026-03-09 15:34:41.051765152 +0000 UTC m=+0.010269409 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 systemd[1]: Started Ceph mon.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: set uid:gid to 167:167 (ceph:ceph) 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable), process ceph-mon, pid 2 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: pidfile_write: ignore empty --pid-file 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: load: jerasure load: lrc 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: RocksDB version: 7.9.2 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Git sha 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Compile date 2026-02-25 18:11:04 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: DB SUMMARY 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: DB Session ID: 6VNDOO20DNNBGGI65J43 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: CURRENT file: CURRENT 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: IDENTITY file: IDENTITY 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: MANIFEST file: MANIFEST-000009 size: 704 Bytes 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: SST files in /var/lib/ceph/mon/ceph-b/store.db dir, Total Num: 1, files: 000021.sst 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-b/store.db: 000019.log size: 542455 ; 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.error_if_exists: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.create_if_missing: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.paranoid_checks: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.flush_verify_memtable_count: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.env: 0x5652d4634dc0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.fs: PosixFileSystem 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.info_log: 0x5652d58145c0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_file_opening_threads: 16 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.statistics: (nil) 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.use_fsync: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_log_file_size: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_manifest_file_size: 1073741824 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.log_file_time_to_roll: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.keep_log_file_num: 1000 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.recycle_log_file_num: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.allow_fallocate: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.allow_mmap_reads: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.allow_mmap_writes: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.use_direct_reads: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.create_missing_column_families: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.db_log_dir: 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.wal_dir: 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.table_cache_numshardbits: 6 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.WAL_ttl_seconds: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.WAL_size_limit_MB: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.manifest_preallocation_size: 4194304 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.is_fd_close_on_exec: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.advise_random_on_open: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.db_write_buffer_size: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.write_buffer_manager: 0x5652d5819900 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.access_hint_on_compaction_start: 1 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.random_access_max_buffer_size: 1048576 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.use_adaptive_mutex: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.rate_limiter: (nil) 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-03-09T15:34:41.314 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.wal_recovery_mode: 2 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.enable_thread_tracking: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.enable_pipelined_write: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.unordered_write: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.allow_concurrent_memtable_write: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.write_thread_max_yield_usec: 100 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.write_thread_slow_yield_usec: 3 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.row_cache: None 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.wal_filter: None 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.avoid_flush_during_recovery: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.allow_ingest_behind: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.two_write_queues: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.manual_wal_flush: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.wal_compression: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.atomic_flush: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.persist_stats_to_disk: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.write_dbid_to_manifest: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.log_readahead_size: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.file_checksum_gen_factory: Unknown 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.best_efforts_recovery: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.allow_data_in_errors: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.db_host_id: __hostname__ 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.enforce_single_del_contracts: true 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_background_jobs: 2 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_background_compactions: -1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_subcompactions: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.avoid_flush_during_shutdown: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.delayed_write_rate : 16777216 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_total_wal_size: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.stats_dump_period_sec: 600 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.stats_persist_period_sec: 600 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.stats_history_buffer_size: 1048576 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_open_files: -1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bytes_per_sync: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.wal_bytes_per_sync: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.strict_bytes_per_sync: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_readahead_size: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_background_flushes: -1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Compression algorithms supported: 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kZSTD supported: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kXpressCompression supported: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kBZip2Compression supported: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kZSTDNotFinalCompression supported: 0 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kLZ4Compression supported: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kZlibCompression supported: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kLZ4HCCompression supported: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: kSnappyCompression supported: 1 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Fast CRC32 supported: Supported on x86 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: DMutex implementation: pthread_mutex_t 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.merge_operator: 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_filter: None 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_filter_factory: None 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.sst_partitioner_factory: None 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.memtable_factory: SkipListFactory 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.table_factory: BlockBasedTable 2026-03-09T15:34:41.315 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x5652d58145a0) 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: cache_index_and_filter_blocks_with_high_priority: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_l0_filter_and_index_blocks_in_cache: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: pin_top_level_index_and_filter: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: index_type: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_index_type: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: index_shortening: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: data_block_hash_table_util_ratio: 0.750000 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: checksum: 4 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: no_block_cache: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache: 0x5652d5839350 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_name: BinnedLRUCache 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_options: 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: capacity : 536870912 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: num_shard_bits : 4 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: strict_capacity_limit : 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: high_pri_pool_ratio: 0.000 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_cache_compressed: (nil) 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: persistent_cache: (nil) 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size: 4096 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_size_deviation: 10 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_restart_interval: 16 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: index_block_restart_interval: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: metadata_block_size: 4096 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: partition_filters: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: use_delta_encoding: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: filter_policy: bloomfilter 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: whole_key_filtering: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: verify_compression: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: read_amp_bytes_per_bit: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: format_version: 5 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_index_compression: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: block_align: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: max_auto_readahead_size: 262144 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: prepopulate_block_cache: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: initial_auto_readahead_size: 8192 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout: num_file_reads_for_auto_readahead: 2 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.write_buffer_size: 33554432 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_write_buffer_number: 2 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression: NoCompression 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression: Disabled 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.prefix_extractor: nullptr 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.num_levels: 7 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.level: 32767 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.enabled: false 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.window_bits: -14 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.level: 32767 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.strategy: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-03-09T15:34:41.316 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.parallel_threads: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.enabled: false 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.level0_stop_writes_trigger: 36 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.target_file_size_base: 67108864 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.target_file_size_multiplier: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_base: 268435456 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_compaction_bytes: 1677721600 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.arena_block_size: 1048576 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.disable_auto_compactions: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.inplace_update_support: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.inplace_update_num_locks: 10000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.memtable_whole_key_filtering: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.memtable_huge_page_size: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.bloom_locality: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.max_successive_merges: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.optimize_filters_for_hits: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.paranoid_file_checks: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.force_consistency_checks: 1 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.report_bg_io_stats: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.ttl: 2592000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.periodic_compaction_seconds: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.preclude_last_level_data_seconds: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.preserve_internal_time_seconds: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.enable_blob_files: false 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.min_blob_size: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.blob_file_size: 268435456 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.blob_compression_type: NoCompression 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.enable_blob_garbage_collection: false 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.blob_compaction_readahead_size: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.blob_file_starting_level: 0 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [table/block_based/block_based_table_reader.cc:721] At least one SST file opened without unique ID to verify: 21.sst 2026-03-09T15:34:41.317 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-b/store.db/MANIFEST-000009 succeeded,manifest_file_number is 9, next_file_number is 23, last_sequence is 11445, log_number is 19,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 19 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: de2130d3-a770-4c9e-b265-9e62d505c903 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070481123076, "job": 1, "event": "recovery_started", "wal_files": [19]} 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #19 mode 2 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070481125736, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 24, "file_size": 310160, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 11441, "largest_seqno": 11590, "table_properties": {"data_size": 308241, "index_size": 630, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 261, "raw_key_size": 1970, "raw_average_key_size": 25, "raw_value_size": 306279, "raw_average_value_size": 3926, "num_data_blocks": 28, "num_entries": 78, "num_filter_entries": 78, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1773070481, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "de2130d3-a770-4c9e-b265-9e62d505c903", "db_session_id": "6VNDOO20DNNBGGI65J43", "orig_file_number": 24, "seqno_to_time_mapping": "N/A"}} 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: EVENT_LOG_v1 {"time_micros": 1773070481125796, "job": 1, "event": "recovery_finished"} 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/version_set.cc:5047] Creating manifest 26 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/version_set.cc:4390] More existing levels in DB than needed. max_bytes_for_level_multiplier may not be guaranteed. 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-b/store.db/000019.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5652d583ae00 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: DB pointer 0x5652d5950000 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: rocksdb: [db/db_impl/db_impl.cc:1111] 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: ** DB Stats ** 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: L0 1/0 302.89 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 143.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: L6 1/0 11.25 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Sum 2/0 11.55 MB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 143.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 143.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: ** Compaction Stats [default] ** 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 143.9 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Uptime(secs): 0.0 total, 0.0 interval 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Flush(GB): cumulative 0.000, interval 0.000 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(GB): cumulative 0.000, interval 0.000 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Total Files): cumulative 0, interval 0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(L0 Files): cumulative 0, interval 0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: AddFile(Keys): cumulative 0, interval 0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Cumulative compaction: 0.00 GB write, 55.43 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Interval compaction: 0.00 GB write, 55.43 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache BinnedLRUCache@0x5652d5839350#2 capacity: 512.00 MB usage: 30.12 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1e-05 secs_since: 0 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: Block cache entry stats(count,size,portion): FilterBlock(2,9.34 KB,0.00178218%) IndexBlock(2,20.78 KB,0.00396371%) Misc(1,0.00 KB,0%) 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: ** File Read Latency Histogram By Level [default] ** 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: starting mon.b rank 2 at public addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] at bind addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon_data /var/lib/ceph/mon/ceph-b fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???) e3 preinit fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).mds e1 new map 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).mds e1 print_map 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: e1 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: btime 1970-01-01T00:00:00:000000+0000 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: enable_multiple, ever_enabled_multiple: 1,1 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2} 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: legacy client fscid: -1 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: 2026-03-09T15:34:41.318 INFO:journalctl@ceph.mon.b.vm09.stdout: No filesystems configured 2026-03-09T15:34:41.319 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).osd e87 crush map has features 3314933000854323200, adjusting msgr requires 2026-03-09T15:34:41.319 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).osd e87 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:41.319 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).osd e87 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:41.319 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).osd e87 crush map has features 432629239337189376, adjusting msgr requires 2026-03-09T15:34:41.319 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:41 vm09 ceph-mon[77297]: mon.b@-1(???).paxosservice(auth 1..21) refresh upgraded, format 0 -> 3 2026-03-09T15:34:41.471 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:41 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:41.039+0000 7fef4b3b6640 -1 mgr.server handle_report got status from non-daemon mon.c 2026-03-09T15:34:42.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:34:42] "GET /metrics HTTP/1.1" 200 34987 "" "Prometheus/2.51.0" 2026-03-09T15:34:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:43.510Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:43.510Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:43.510Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:43.510Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:44.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:44 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[86498]: Upgrade: It appears safe to stop mon.b 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[88323]: Upgrade: It appears safe to stop mon.b 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:46.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:47.280 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:46 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:47.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:47 vm09 ceph-mon[77297]: Upgrade: It appears safe to stop mon.b 2026-03-09T15:34:47.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:47.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:47.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:47.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: fsmap 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: mgrmap e34: y(active, since 16s), standbys: x 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: overall HEALTH_OK 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: mon.b calling monitor election 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: mon.c calling monitor election 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: mon.a calling monitor election 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: monmap epoch 4 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: last_changed 2026-03-09T15:34:47.364398+0000 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: min_mon_release 19 (squid) 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: election_strategy: 1 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: fsmap 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: mgrmap e34: y(active, since 16s), standbys: x 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: overall HEALTH_OK 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: fsmap 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: mgrmap e34: y(active, since 16s), standbys: x 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: overall HEALTH_OK 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: mon.b calling monitor election 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: mon.c calling monitor election 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: mon.a calling monitor election 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: monmap epoch 4 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: last_changed 2026-03-09T15:34:47.364398+0000 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: min_mon_release 19 (squid) 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: election_strategy: 1 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: fsmap 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: mgrmap e34: y(active, since 16s), standbys: x 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: overall HEALTH_OK 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: fsmap 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: mgrmap e34: y(active, since 16s), standbys: x 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: overall HEALTH_OK 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: mon.b calling monitor election 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: mon.c calling monitor election 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: mon.a calling monitor election 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: mon.a is new leader, mons a,c,b in quorum (ranks 0,1,2) 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: monmap epoch 4 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: last_changed 2026-03-09T15:34:47.364398+0000 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: created 2026-03-09T15:26:23.907274+0000 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: min_mon_release 19 (squid) 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: election_strategy: 1 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: 0: [v2:192.168.123.105:3300/0,v1:192.168.123.105:6789/0] mon.a 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: 1: [v2:192.168.123.105:3301/0,v1:192.168.123.105:6790/0] mon.c 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: 2: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.b 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: fsmap 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: osdmap e87: 8 total, 8 up, 8 in 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: mgrmap e34: y(active, since 16s), standbys: x 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: overall HEALTH_OK 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "a"}]: dispatch 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "b"}]: dispatch 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mon metadata", "id": "c"}]: dispatch 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:48.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:49.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:49.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:49.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:49.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:49.563 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: pgmap v12: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 7 op/s 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:49.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: Reconfiguring mon.a (monmap changed)... 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: Reconfiguring daemon mon.a on vm05 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: Reconfiguring daemon mgr.y on vm05 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: Reconfiguring mon.c (monmap changed)... 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: Reconfiguring daemon mon.c on vm05 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: Reconfiguring mon.a (monmap changed)... 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: Reconfiguring daemon mon.a on vm05 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: Reconfiguring daemon mgr.y on vm05 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: Reconfiguring mon.c (monmap changed)... 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: Reconfiguring daemon mon.c on vm05 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:34:50.700 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:50.866 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: Reconfiguring mon.a (monmap changed)... 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: Reconfiguring daemon mon.a on vm05 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: Reconfiguring mgr.y (monmap changed)... 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.y", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: Reconfiguring daemon mgr.y on vm05 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: Reconfiguring mon.c (monmap changed)... 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: Reconfiguring daemon mon.c on vm05 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:34:51.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.372 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:34:51.137+0000 7fef4b3b6640 -1 mgr.server handle_report got status from non-daemon mon.b 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (56s) 13s ago 6m 22.6M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (45s) 3s ago 5m 42.6M - dad864ee21e9 6a58314a043e 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (33s) 13s ago 5m 45.8M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (31s) 3s ago 7m 487M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (44s) 13s ago 8m 540M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (25s) 13s ago 8m 45.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (10s) 3s ago 7m 21.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (15s) 13s ago 7m 23.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (61s) 13s ago 6m 9277k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (52s) 3s ago 6m 9202k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (7m) 13s ago 7m 54.6M 4096M 17.2.0 e1d6a67b021e b143b061d0dd 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (7m) 13s ago 7m 53.1M 4096M 17.2.0 e1d6a67b021e 2277528e9f90 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (7m) 13s ago 7m 49.4M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (7m) 13s ago 7m 54.4M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (6m) 3s ago 6m 52.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (6m) 3s ago 6m 50.4M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (6m) 3s ago 6m 51.1M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (6m) 3s ago 6m 51.4M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (32s) 3s ago 6m 42.6M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (5m) 13s ago 5m 92.3M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (5m) 3s ago 5m 91.7M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (5m) 13s ago 5m 93.4M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:34:51.394 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (5m) 3s ago 5m 90.7M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 12, 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 5 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:34:51.698 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:34:51.970 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:34:51.970 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:34:51.970 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:34:51.970 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: "mon" 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "5/23 daemons upgraded", 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading mon daemons", 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:34:51.971 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: Reconfiguring daemon osd.0 on vm05 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: Reconfiguring daemon osd.1 on vm05 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='client.34125 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/4237566740' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: Reconfiguring daemon osd.0 on vm05 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: Reconfiguring daemon osd.1 on vm05 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='client.34125 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:51.971 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:51 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/4237566740' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: Reconfiguring osd.0 (monmap changed)... 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: Reconfiguring daemon osd.0 on vm05 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: pgmap v13: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: Reconfiguring osd.1 (monmap changed)... 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: Reconfiguring daemon osd.1 on vm05 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='client.34125 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:51 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/4237566740' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:52.295 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:34:52.863 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:34:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:34:52] "GET /metrics HTTP/1.1" 200 37799 "" "Prometheus/2.51.0" 2026-03-09T15:34:53.016 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='client.34131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.016 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T15:34:53.016 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: Reconfiguring daemon osd.2 on vm05 2026-03-09T15:34:53.016 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='client.44101 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: Reconfiguring daemon osd.3 on vm05 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: Reconfiguring rgw.foo.vm05.tiuqos (monmap changed)... 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: Reconfiguring daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='client.34146 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/855574823' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:53.017 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='client.34131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: Reconfiguring daemon osd.2 on vm05 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='client.44101 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: Reconfiguring daemon osd.3 on vm05 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: Reconfiguring rgw.foo.vm05.tiuqos (monmap changed)... 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: Reconfiguring daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='client.34146 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/855574823' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='client.34131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: Reconfiguring osd.2 (monmap changed)... 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: Reconfiguring daemon osd.2 on vm05 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='client.44101 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: Reconfiguring osd.3 (monmap changed)... 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: Reconfiguring daemon osd.3 on vm05 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: Reconfiguring rgw.foo.vm05.tiuqos (monmap changed)... 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: Reconfiguring daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='client.34146 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/855574823' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "mon."}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config get", "who": "mon", "key": "public_network"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "mgr.x", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "mgr services"}]: dispatch 2026-03-09T15:34:53.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.865 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:53.510Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:53.866 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:53.510Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:53.866 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:53.511Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:53.866 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:34:53.511Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: Reconfiguring rgw.smpl.vm05.grnlph (monmap changed)... 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: Reconfiguring daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: Reconfiguring mon.b (monmap changed)... 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: Reconfiguring daemon mon.b on vm09 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: Reconfiguring daemon mgr.x on vm09 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:34:53.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: Reconfiguring rgw.smpl.vm05.grnlph (monmap changed)... 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: Reconfiguring daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: Reconfiguring mon.b (monmap changed)... 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: Reconfiguring daemon mon.b on vm09 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: Reconfiguring daemon mgr.x on vm09 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: Reconfiguring rgw.smpl.vm05.grnlph (monmap changed)... 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: Reconfiguring daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: pgmap v14: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: Reconfiguring mon.b (monmap changed)... 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: Reconfiguring daemon mon.b on vm09 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: Reconfiguring mgr.x (monmap changed)... 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: Reconfiguring daemon mgr.x on vm09 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:34:54.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: Reconfiguring daemon osd.4 on vm09 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: Reconfiguring daemon osd.5 on vm09 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.980 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:54.981 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:54.981 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:54 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: Reconfiguring daemon osd.4 on vm09 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: Reconfiguring daemon osd.5 on vm09 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: Reconfiguring osd.4 (monmap changed)... 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: Reconfiguring daemon osd.4 on vm09 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: Reconfiguring osd.5 (monmap changed)... 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: Reconfiguring daemon osd.5 on vm09 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:55.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:54 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: Reconfiguring daemon osd.6 on vm09 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: Reconfiguring daemon osd.7 on vm09 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: Reconfiguring rgw.foo.vm09.aljafu (monmap changed)... 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: Reconfiguring daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T15:34:55.988 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: Reconfiguring daemon osd.6 on vm09 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: Reconfiguring daemon osd.7 on vm09 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: Reconfiguring rgw.foo.vm09.aljafu (monmap changed)... 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: Reconfiguring daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:34:55.989 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:55 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:56.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: Reconfiguring osd.6 (monmap changed)... 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: Reconfiguring daemon osd.6 on vm09 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: Reconfiguring osd.7 (monmap changed)... 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: pgmap v15: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: Reconfiguring daemon osd.7 on vm09 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: Reconfiguring rgw.foo.vm09.aljafu (monmap changed)... 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: Reconfiguring daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.a"}]': finished 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.b"}]': finished 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "mon.c"}]': finished 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.0"}]: dispatch 2026-03-09T15:34:56.064 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:55 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:34:56.542 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 systemd[1]: Stopping Ceph osd.0 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:56.883 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[57406]: 2026-03-09T15:34:56.619+0000 7f440ebed700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:34:56.883 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[57406]: 2026-03-09T15:34:56.619+0000 7f440ebed700 -1 osd.0 87 *** Got signal Terminated *** 2026-03-09T15:34:56.883 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[57406]: 2026-03-09T15:34:56.619+0000 7f440ebed700 -1 osd.0 87 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Reconfiguring rgw.smpl.vm09.mkjxeh (monmap changed)... 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Reconfiguring daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all mon 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all crash 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Upgrade: osd.0 is safe to restart 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Upgrade: Updating osd.0 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: Deploying daemon osd.0 on vm05 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[86498]: osd.0 marked itself down and dead 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Reconfiguring rgw.smpl.vm09.mkjxeh (monmap changed)... 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Reconfiguring daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all mon 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all crash 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Upgrade: osd.0 is safe to restart 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Upgrade: Updating osd.0 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: Deploying daemon osd.0 on vm05 2026-03-09T15:34:57.148 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:56 vm05 ceph-mon[88323]: osd.0 marked itself down and dead 2026-03-09T15:34:57.149 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 podman[90400]: 2026-03-09 15:34:56.934528475 +0000 UTC m=+0.329748188 container died b143b061d0dde4f29370a386a26efddfc24a33a6f3e92a3734cdd427b4704084 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, GIT_CLEAN=True, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, version=8, GIT_BRANCH=HEAD, architecture=x86_64, ceph=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, distribution-scope=public, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, name=centos-stream, RELEASE=HEAD, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, release=754) 2026-03-09T15:34:57.149 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 podman[90400]: 2026-03-09 15:34:56.966856771 +0000 UTC m=+0.362076474 container remove b143b061d0dde4f29370a386a26efddfc24a33a6f3e92a3734cdd427b4704084 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, ceph=True, io.buildah.version=1.19.8, GIT_REPO=https://github.com/ceph/ceph-container.git, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, GIT_CLEAN=True, RELEASE=HEAD, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, io.openshift.tags=base centos centos-stream, release=754, version=8, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., GIT_BRANCH=HEAD, build-date=2022-05-03T08:36:31.336870, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, name=centos-stream, io.openshift.expose-services=, architecture=x86_64) 2026-03-09T15:34:57.149 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:56 vm05 bash[90400]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0 2026-03-09T15:34:57.149 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.125014225 +0000 UTC m=+0.017817773 container create f8a0261d125e35ae5a9c31d6c18cd610f611915fd758e68829e56d2998f59754 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0) 2026-03-09T15:34:57.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:34:56 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:34:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:34:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Reconfiguring rgw.smpl.vm09.mkjxeh (monmap changed)... 2026-03-09T15:34:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Reconfiguring daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:34:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all mon 2026-03-09T15:34:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all crash 2026-03-09T15:34:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["0"], "max": 16}]: dispatch 2026-03-09T15:34:57.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Upgrade: osd.0 is safe to restart 2026-03-09T15:34:57.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Upgrade: Updating osd.0 2026-03-09T15:34:57.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: Deploying daemon osd.0 on vm05 2026-03-09T15:34:57.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:56 vm09 ceph-mon[77297]: osd.0 marked itself down and dead 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.18304844 +0000 UTC m=+0.075851988 container init f8a0261d125e35ae5a9c31d6c18cd610f611915fd758e68829e56d2998f59754 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.186182693 +0000 UTC m=+0.078986241 container start f8a0261d125e35ae5a9c31d6c18cd610f611915fd758e68829e56d2998f59754 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , ceph=True) 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.191742524 +0000 UTC m=+0.084546082 container attach f8a0261d125e35ae5a9c31d6c18cd610f611915fd758e68829e56d2998f59754 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3) 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.117841958 +0000 UTC m=+0.010645506 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.325190854 +0000 UTC m=+0.217994402 container died f8a0261d125e35ae5a9c31d6c18cd610f611915fd758e68829e56d2998f59754 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90468]: 2026-03-09 15:34:57.345146255 +0000 UTC m=+0.237949803 container remove f8a0261d125e35ae5a9c31d6c18cd610f611915fd758e68829e56d2998f59754 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.0.service: Deactivated successfully. 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 systemd[1]: Stopped Ceph osd.0 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:57.422 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.0.service: Consumed 4.703s CPU time. 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 systemd[1]: Starting Ceph osd.0 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90571]: 2026-03-09 15:34:57.647583923 +0000 UTC m=+0.016253978 container create ecd1284da70ca832ea06d05db7534ffe692a0ca8a2a24674b8bffae5116df9cc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90571]: 2026-03-09 15:34:57.683341163 +0000 UTC m=+0.052011209 container init ecd1284da70ca832ea06d05db7534ffe692a0ca8a2a24674b8bffae5116df9cc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2) 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90571]: 2026-03-09 15:34:57.685925368 +0000 UTC m=+0.054595423 container start ecd1284da70ca832ea06d05db7534ffe692a0ca8a2a24674b8bffae5116df9cc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, CEPH_REF=squid, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90571]: 2026-03-09 15:34:57.68679752 +0000 UTC m=+0.055467575 container attach ecd1284da70ca832ea06d05db7534ffe692a0ca8a2a24674b8bffae5116df9cc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 podman[90571]: 2026-03-09 15:34:57.641095124 +0000 UTC m=+0.009765179 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 bash[90571]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:57.921 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:57 vm05 bash[90571]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:58.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:57 vm05 ceph-mon[86498]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:58.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:57 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:34:58.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:57 vm05 ceph-mon[86498]: osdmap e88: 8 total, 7 up, 8 in 2026-03-09T15:34:58.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:57 vm05 ceph-mon[88323]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:58.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:57 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:34:58.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:57 vm05 ceph-mon[88323]: osdmap e88: 8 total, 7 up, 8 in 2026-03-09T15:34:58.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:57 vm09 ceph-mon[77297]: pgmap v16: 161 pgs: 161 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:34:58.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:57 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:34:58.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:57 vm09 ceph-mon[77297]: osdmap e88: 8 total, 7 up, 8 in 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-adef8b2b-6956-4673-9290-4e514b0cdf69/osd-block-04a1f096-2671-4227-83a4-258146ba498d --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-adef8b2b-6956-4673-9290-4e514b0cdf69/osd-block-04a1f096-2671-4227-83a4-258146ba498d --path /var/lib/ceph/osd/ceph-0 --no-mon-config 2026-03-09T15:34:58.683 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/ln -snf /dev/ceph-adef8b2b-6956-4673-9290-4e514b0cdf69/osd-block-04a1f096-2671-4227-83a4-258146ba498d /var/lib/ceph/osd/ceph-0/block 2026-03-09T15:34:58.684 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/ln -snf /dev/ceph-adef8b2b-6956-4673-9290-4e514b0cdf69/osd-block-04a1f096-2671-4227-83a4-258146ba498d /var/lib/ceph/osd/ceph-0/block 2026-03-09T15:34:58.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:58 vm05 ceph-mon[86498]: osdmap e89: 8 total, 7 up, 8 in 2026-03-09T15:34:58.965 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:58 vm05 ceph-mon[88323]: osdmap e89: 8 total, 7 up, 8 in 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate[90583]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 bash[90571]: --> ceph-volume lvm activate successful for osd ID: 0 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 podman[90571]: 2026-03-09 15:34:58.712729704 +0000 UTC m=+1.081399759 container died ecd1284da70ca832ea06d05db7534ffe692a0ca8a2a24674b8bffae5116df9cc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, ceph=True) 2026-03-09T15:34:58.965 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 podman[90571]: 2026-03-09 15:34:58.842008787 +0000 UTC m=+1.210678851 container remove ecd1284da70ca832ea06d05db7534ffe692a0ca8a2a24674b8bffae5116df9cc (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-activate, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:34:59.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:58 vm05 podman[90819]: 2026-03-09 15:34:58.963188957 +0000 UTC m=+0.017320312 container create 343a65bb3f011bc60ffefb2c065da72d55ab4034c2e52b9d8c0a7686e6197268 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, io.buildah.version=1.41.3) 2026-03-09T15:34:59.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:59 vm05 podman[90819]: 2026-03-09 15:34:59.003580778 +0000 UTC m=+0.057712133 container init 343a65bb3f011bc60ffefb2c065da72d55ab4034c2e52b9d8c0a7686e6197268 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default) 2026-03-09T15:34:59.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:59 vm05 podman[90819]: 2026-03-09 15:34:59.006935153 +0000 UTC m=+0.061066508 container start 343a65bb3f011bc60ffefb2c065da72d55ab4034c2e52b9d8c0a7686e6197268 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, ceph=True, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:34:59.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:59 vm05 bash[90819]: 343a65bb3f011bc60ffefb2c065da72d55ab4034c2e52b9d8c0a7686e6197268 2026-03-09T15:34:59.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:59 vm05 podman[90819]: 2026-03-09 15:34:58.955148836 +0000 UTC m=+0.009280201 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:34:59.237 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:59 vm05 systemd[1]: Started Ceph osd.0 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:34:59.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:58 vm09 ceph-mon[77297]: osdmap e89: 8 total, 7 up, 8 in 2026-03-09T15:34:59.719 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:34:59 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:34:59.352+0000 7f3dcf860740 -1 Falling back to public interface 2026-03-09T15:34:59.978 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:59 vm05 ceph-mon[86498]: pgmap v19: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:59.978 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:59 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:59.978 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:34:59 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:59.978 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:59 vm05 ceph-mon[88323]: pgmap v19: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:34:59.978 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:59 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:34:59.978 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:34:59 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:00.235 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:35:00 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:35:00.211+0000 7f3dcf860740 -1 osd.0 0 read_superblock omap replica is missing. 2026-03-09T15:35:00.235 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:35:00 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:35:00.233+0000 7f3dcf860740 -1 osd.0 87 log_to_monitors true 2026-03-09T15:35:00.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:59 vm09 ceph-mon[77297]: pgmap v19: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:00.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:59 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:00.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:34:59 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:35:01.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:35:01.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024]' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:35:01.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]: dispatch 2026-03-09T15:35:01.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:01.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:01.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:02.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[86498]: pgmap v20: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:35:02.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[86498]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T15:35:02.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[86498]: osdmap e90: 8 total, 7 up, 8 in 2026-03-09T15:35:02.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[86498]: from='osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:02.237 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[86498]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:02.240 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[88323]: pgmap v20: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:35:02.240 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[88323]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T15:35:02.240 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[88323]: osdmap e90: 8 total, 7 up, 8 in 2026-03-09T15:35:02.240 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[88323]: from='osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:02.240 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:01 vm05 ceph-mon[88323]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:02.240 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:35:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:35:02.071+0000 7f3dc6e0a640 -1 osd.0 87 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:35:02.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:01 vm09 ceph-mon[77297]: pgmap v20: 161 pgs: 21 stale+active+clean, 140 active+clean; 457 KiB data, 100 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:35:02.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:01 vm09 ceph-mon[77297]: from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["0"]}]': finished 2026-03-09T15:35:02.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:01 vm09 ceph-mon[77297]: osdmap e90: 8 total, 7 up, 8 in 2026-03-09T15:35:02.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:01 vm09 ceph-mon[77297]: from='osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024]' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:02.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:01 vm09 ceph-mon[77297]: from='osd.0 ' entity='osd.0' cmd=[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:02.738 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:35:02] "GET /metrics HTTP/1.1" 200 37873 "" "Prometheus/2.51.0" 2026-03-09T15:35:02.738 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:35:02.656+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:02 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:03.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:02 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:03.978 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:03.511Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:03.978 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:03.511Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:03.978 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:03.512Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:03.978 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:03.512Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: OSD bench result of 15448.778519 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: pgmap v22: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 681 B/s rd, 0 op/s; 67/627 objects degraded (10.686%) 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 67/627 objects degraded (10.686%), 21 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024] boot 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:35:04.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: OSD bench result of 15448.778519 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: pgmap v22: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 681 B/s rd, 0 op/s; 67/627 objects degraded (10.686%) 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 67/627 objects degraded (10.686%), 21 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024] boot 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T15:35:04.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:35:04.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:04.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-09T15:35:04.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:04.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: OSD bench result of 15448.778519 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.0. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: pgmap v22: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 681 B/s rd, 0 op/s; 67/627 objects degraded (10.686%) 2026-03-09T15:35:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:35:04.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:04.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:35:04.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 67/627 objects degraded (10.686%), 21 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:04.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: osd.0 [v2:192.168.123.105:6802/2256159024,v1:192.168.123.105:6803/2256159024] boot 2026-03-09T15:35:04.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: osdmap e91: 8 total, 8 up, 8 in 2026-03-09T15:35:04.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 0}]: dispatch 2026-03-09T15:35:05.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:05 vm09 ceph-mon[77297]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T15:35:05.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:05 vm05 ceph-mon[86498]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T15:35:05.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:05 vm05 ceph-mon[88323]: osdmap e92: 8 total, 8 up, 8 in 2026-03-09T15:35:06.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:06 vm09 ceph-mon[77297]: pgmap v25: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 67/627 objects degraded (10.686%) 2026-03-09T15:35:06.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:06 vm05 ceph-mon[86498]: pgmap v25: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 67/627 objects degraded (10.686%) 2026-03-09T15:35:06.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:06 vm05 ceph-mon[88323]: pgmap v25: 161 pgs: 37 active+undersized, 21 active+undersized+degraded, 103 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 67/627 objects degraded (10.686%) 2026-03-09T15:35:07.062 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:07.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:07 vm05 ceph-mon[86498]: pgmap v26: 161 pgs: 15 active+undersized, 11 active+undersized+degraded, 135 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 36/627 objects degraded (5.742%) 2026-03-09T15:35:07.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:07 vm05 ceph-mon[88323]: pgmap v26: 161 pgs: 15 active+undersized, 11 active+undersized+degraded, 135 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 36/627 objects degraded (5.742%) 2026-03-09T15:35:07.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:07 vm09 ceph-mon[77297]: pgmap v26: 161 pgs: 15 active+undersized, 11 active+undersized+degraded, 135 active+clean; 457 KiB data, 120 MiB used, 160 GiB / 160 GiB avail; 36/627 objects degraded (5.742%) 2026-03-09T15:35:08.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:08 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 36/627 objects degraded (5.742%), 11 pgs degraded) 2026-03-09T15:35:08.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:08 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:35:08.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:08 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 36/627 objects degraded (5.742%), 11 pgs degraded) 2026-03-09T15:35:08.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:08 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:35:09.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:08 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 36/627 objects degraded (5.742%), 11 pgs degraded) 2026-03-09T15:35:09.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:08 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:35:09.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:09 vm05 ceph-mon[86498]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 545 B/s rd, 0 op/s 2026-03-09T15:35:09.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:09 vm05 ceph-mon[88323]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 545 B/s rd, 0 op/s 2026-03-09T15:35:10.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:09 vm09 ceph-mon[77297]: pgmap v27: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 545 B/s rd, 0 op/s 2026-03-09T15:35:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:11 vm05 ceph-mon[86498]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:35:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:11 vm05 ceph-mon[88323]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:35:12.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:11 vm09 ceph-mon[77297]: pgmap v28: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 639 B/s rd, 0 op/s 2026-03-09T15:35:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:12 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:12.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:35:12] "GET /metrics HTTP/1.1" 200 37873 "" "Prometheus/2.51.0" 2026-03-09T15:35:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:12 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:13.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:12 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:13.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:13 vm05 ceph-mon[86498]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:35:13.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:13 vm05 ceph-mon[88323]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:35:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:13.512Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:13.512Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:13.513Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:13.513Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:14.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:13 vm09 ceph-mon[77297]: pgmap v29: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:35:14.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:14.148Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-09T15:35:14.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:14.149Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[86498]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 975 B/s rd, 0 op/s 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[88323]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 975 B/s rd, 0 op/s 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:15 vm09 ceph-mon[77297]: pgmap v30: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 975 B/s rd, 0 op/s 2026-03-09T15:35:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:17.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:17.912 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:17 vm05 ceph-mon[86498]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:17.913 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:17 vm05 ceph-mon[88323]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:17 vm09 ceph-mon[77297]: pgmap v31: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:18.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:18.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:18.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: Upgrade: osd.1 is safe to restart 2026-03-09T15:35:18.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: Upgrade: Updating osd.1 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[86498]: Deploying daemon osd.1 on vm05 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: Upgrade: osd.1 is safe to restart 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: Upgrade: Updating osd.1 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:18.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:18 vm05 ceph-mon[88323]: Deploying daemon osd.1 on vm05 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["1"], "max": 16}]: dispatch 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: Upgrade: osd.1 is safe to restart 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: Upgrade: Updating osd.1 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.1"}]: dispatch 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:19.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:18 vm09 ceph-mon[77297]: Deploying daemon osd.1 on vm05 2026-03-09T15:35:19.236 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:18 vm05 systemd[1]: Stopping Ceph osd.1 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:35:19.236 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[60125]: 2026-03-09T15:35:18.853+0000 7fa4dc7ad700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:35:19.236 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[60125]: 2026-03-09T15:35:18.854+0000 7fa4dc7ad700 -1 osd.1 92 *** Got signal Terminated *** 2026-03-09T15:35:19.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[60125]: 2026-03-09T15:35:18.854+0000 7fa4dc7ad700 -1 osd.1 92 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:35:19.933 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:19 vm05 ceph-mon[86498]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:19.933 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:19 vm05 ceph-mon[86498]: osd.1 marked itself down and dead 2026-03-09T15:35:19.933 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:19 vm05 ceph-mon[88323]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:19.933 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:19 vm05 ceph-mon[88323]: osd.1 marked itself down and dead 2026-03-09T15:35:19.933 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 podman[92334]: 2026-03-09 15:35:19.719361509 +0000 UTC m=+0.879893131 container died 2277528e9f90a60e2dd62472ba5d6a6117a7dba1a2e93a9b63cf6d797f873f3e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, io.buildah.version=1.19.8, vcs-type=git, build-date=2022-05-03T08:36:31.336870, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, RELEASE=HEAD, CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, vendor=Red Hat, Inc., com.redhat.license_terms=https://centos.org/legal/licensing-policy/, name=centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, version=8, distribution-scope=public, ceph=True, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, architecture=x86_64, release=754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb) 2026-03-09T15:35:19.933 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 podman[92334]: 2026-03-09 15:35:19.758149459 +0000 UTC m=+0.918681071 container remove 2277528e9f90a60e2dd62472ba5d6a6117a7dba1a2e93a9b63cf6d797f873f3e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, com.redhat.component=centos-stream-container, version=8, GIT_REPO=https://github.com/ceph/ceph-container.git, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, maintainer=Guillaume Abrioux , ceph=True, io.k8s.display-name=CentOS Stream 8, io.buildah.version=1.19.8, io.openshift.tags=base centos centos-stream, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, RELEASE=HEAD, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, release=754, CEPH_POINT_RELEASE=-17.2.0, GIT_BRANCH=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., architecture=x86_64, vcs-type=git, vendor=Red Hat, Inc., build-date=2022-05-03T08:36:31.336870) 2026-03-09T15:35:19.933 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 bash[92334]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1 2026-03-09T15:35:19.933 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 podman[92400]: 2026-03-09 15:35:19.910876064 +0000 UTC m=+0.018312609 container create 1d046b6485e697bb9841289ee3428f737c7eb48848107e97c9c40ba7a5336898 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-deactivate, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0) 2026-03-09T15:35:20.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:19 vm09 ceph-mon[77297]: pgmap v32: 161 pgs: 161 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:20.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:19 vm09 ceph-mon[77297]: osd.1 marked itself down and dead 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 podman[92400]: 2026-03-09 15:35:19.952926798 +0000 UTC m=+0.060363353 container init 1d046b6485e697bb9841289ee3428f737c7eb48848107e97c9c40ba7a5336898 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 podman[92400]: 2026-03-09 15:35:19.956560826 +0000 UTC m=+0.063997381 container start 1d046b6485e697bb9841289ee3428f737c7eb48848107e97c9c40ba7a5336898 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:19 vm05 podman[92400]: 2026-03-09 15:35:19.957640797 +0000 UTC m=+0.065077352 container attach 1d046b6485e697bb9841289ee3428f737c7eb48848107e97c9c40ba7a5336898 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-deactivate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92400]: 2026-03-09 15:35:19.903949105 +0000 UTC m=+0.011385671 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92419]: 2026-03-09 15:35:20.121104407 +0000 UTC m=+0.011630438 container died 1d046b6485e697bb9841289ee3428f737c7eb48848107e97c9c40ba7a5336898 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92419]: 2026-03-09 15:35:20.137287801 +0000 UTC m=+0.027813842 container remove 1d046b6485e697bb9841289ee3428f737c7eb48848107e97c9c40ba7a5336898 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-deactivate, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.1.service: Deactivated successfully. 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 systemd[1]: Stopped Ceph osd.1 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:35:20.237 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.1.service: Consumed 15.250s CPU time. 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 systemd[1]: Starting Ceph osd.1 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92505]: 2026-03-09 15:35:20.452080572 +0000 UTC m=+0.017913361 container create 0645cd7cf5c9362be3cf590c2ea65fb7a331f3479d02590c421bfe1ba9938e56 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92505]: 2026-03-09 15:35:20.492520803 +0000 UTC m=+0.058353592 container init 0645cd7cf5c9362be3cf590c2ea65fb7a331f3479d02590c421bfe1ba9938e56 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, io.buildah.version=1.41.3) 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92505]: 2026-03-09 15:35:20.496601407 +0000 UTC m=+0.062434196 container start 0645cd7cf5c9362be3cf590c2ea65fb7a331f3479d02590c421bfe1ba9938e56 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3) 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92505]: 2026-03-09 15:35:20.498055797 +0000 UTC m=+0.063888597 container attach 0645cd7cf5c9362be3cf590c2ea65fb7a331f3479d02590c421bfe1ba9938e56 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, OSD_FLAVOR=default, org.label-schema.vendor=CentOS) 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 podman[92505]: 2026-03-09 15:35:20.44539765 +0000 UTC m=+0.011230449 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 bash[92505]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:20.706 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:20 vm05 bash[92505]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:20.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:20 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:35:20.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:20 vm05 ceph-mon[86498]: osdmap e93: 8 total, 7 up, 8 in 2026-03-09T15:35:20.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:20 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:35:20.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:20 vm05 ceph-mon[88323]: osdmap e93: 8 total, 7 up, 8 in 2026-03-09T15:35:21.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:20 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:35:21.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:20 vm09 ceph-mon[77297]: osdmap e93: 8 total, 7 up, 8 in 2026-03-09T15:35:21.455 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f3338b47-1981-486f-9ca1-20afa9a1b7ac/osd-block-a281303c-8662-4f54-8846-33be08391553 --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f3338b47-1981-486f-9ca1-20afa9a1b7ac/osd-block-a281303c-8662-4f54-8846-33be08391553 --path /var/lib/ceph/osd/ceph-1 --no-mon-config 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/ln -snf /dev/ceph-f3338b47-1981-486f-9ca1-20afa9a1b7ac/osd-block-a281303c-8662-4f54-8846-33be08391553 /var/lib/ceph/osd/ceph-1/block 2026-03-09T15:35:21.456 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/ln -snf /dev/ceph-f3338b47-1981-486f-9ca1-20afa9a1b7ac/osd-block-a281303c-8662-4f54-8846-33be08391553 /var/lib/ceph/osd/ceph-1/block 2026-03-09T15:35:21.736 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-09T15:35:21.736 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-1/block 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate[92516]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92505]: --> ceph-volume lvm activate successful for osd ID: 1 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 podman[92505]: 2026-03-09 15:35:21.497020394 +0000 UTC m=+1.062853183 container died 0645cd7cf5c9362be3cf590c2ea65fb7a331f3479d02590c421bfe1ba9938e56 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 podman[92505]: 2026-03-09 15:35:21.525062351 +0000 UTC m=+1.090895140 container remove 0645cd7cf5c9362be3cf590c2ea65fb7a331f3479d02590c421bfe1ba9938e56 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1-activate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 podman[92770]: 2026-03-09 15:35:21.62640892 +0000 UTC m=+0.019085684 container create 379185d73d4e0c4fd2caeb66da15342d8f6393d7d7b12278dd35fc4bf2c227f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 podman[92770]: 2026-03-09 15:35:21.666433473 +0000 UTC m=+0.059110237 container init 379185d73d4e0c4fd2caeb66da15342d8f6393d7d7b12278dd35fc4bf2c227f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, ceph=True) 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 podman[92770]: 2026-03-09 15:35:21.669368143 +0000 UTC m=+0.062044907 container start 379185d73d4e0c4fd2caeb66da15342d8f6393d7d7b12278dd35fc4bf2c227f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 bash[92770]: 379185d73d4e0c4fd2caeb66da15342d8f6393d7d7b12278dd35fc4bf2c227f3 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 podman[92770]: 2026-03-09 15:35:21.61781333 +0000 UTC m=+0.010490104 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 systemd[1]: Started Ceph osd.1 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:35:21.737 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:21 vm05 ceph-osd[92785]: -- 192.168.123.105:0/2320212019 <== mon.0 v2:192.168.123.105:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x5620ac7e6960 con 0x5620ad5d0400 2026-03-09T15:35:22.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:21 vm05 ceph-mon[86498]: pgmap v34: 161 pgs: 20 stale+active+clean, 141 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:35:22.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:21 vm05 ceph-mon[86498]: osdmap e94: 8 total, 7 up, 8 in 2026-03-09T15:35:22.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:21 vm05 ceph-mon[88323]: pgmap v34: 161 pgs: 20 stale+active+clean, 141 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:35:22.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:21 vm05 ceph-mon[88323]: osdmap e94: 8 total, 7 up, 8 in 2026-03-09T15:35:22.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:21 vm09 ceph-mon[77297]: pgmap v34: 161 pgs: 20 stale+active+clean, 141 active+clean; 457 KiB data, 121 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:35:22.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:21 vm09 ceph-mon[77297]: osdmap e94: 8 total, 7 up, 8 in 2026-03-09T15:35:22.582 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:35:22.249+0000 7f88283b2740 -1 Falling back to public interface 2026-03-09T15:35:22.582 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:35:22.890 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:35:22] "GET /metrics HTTP/1.1" 200 37897 "" "Prometheus/2.51.0" 2026-03-09T15:35:23.151 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:23.151 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:23.151 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:23.151 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[88323]: from='osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:35:23.151 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[88323]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:35:23.151 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:35:22.895+0000 7f88283b2740 -1 osd.1 0 read_superblock omap replica is missing. 2026-03-09T15:35:23.151 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:35:22.941+0000 7f88283b2740 -1 osd.1 92 log_to_monitors true 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (88s) 22s ago 6m 24.8M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (77s) 35s ago 6m 42.6M - dad864ee21e9 6a58314a043e 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (65s) 22s ago 6m 46.1M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (62s) 35s ago 8m 487M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (75s) 22s ago 8m 548M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (57s) 22s ago 8m 46.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (42s) 35s ago 8m 21.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (47s) 22s ago 8m 38.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (92s) 22s ago 6m 9483k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (84s) 35s ago 6m 9202k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (24s) 22s ago 7m 15.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 starting - - - 4096M 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (7m) 22s ago 7m 49.8M 4096M 17.2.0 e1d6a67b021e 21b53f2cd34c 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (7m) 22s ago 7m 55.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (7m) 35s ago 7m 52.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (7m) 35s ago 7m 50.4M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (7m) 35s ago 7m 51.1M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (6m) 35s ago 6m 51.4M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (64s) 35s ago 6m 42.6M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (6m) 22s ago 6m 92.7M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (6m) 35s ago 6m 91.7M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (6m) 22s ago 6m 93.9M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:35:23.152 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (6m) 35s ago 6m 90.7M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:35:23.154 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:23.154 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:23.154 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:23.154 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[86498]: from='osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:35:23.154 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-mon[86498]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:35:23.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:23 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:23.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:23 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:23.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:23 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:23.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:23 vm09 ceph-mon[77297]: from='osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294]' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:35:23.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:23 vm09 ceph-mon[77297]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]: dispatch 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 1 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:35:23.579 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:35:23.580 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:23.580 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:35:23.580 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 10, 2026-03-09T15:35:23.580 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-09T15:35:23.580 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:35:23.580 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:35:23.945 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:23.513Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:23.945 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:23.514Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:23.945 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:23.515Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:23.945 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:23.515Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "mon" 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "6/23 daemons upgraded", 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:35:23.946 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: pgmap v36: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: from='client.44131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: from='client.44137 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: osdmap e95: 8 total, 7 up, 8 in 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: from='osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/271323391' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: pgmap v36: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:35:24.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: from='client.44131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: from='client.44137 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: osdmap e95: 8 total, 7 up, 8 in 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: from='osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:24.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:24 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/271323391' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:24.273 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_WARN 1 osds down; Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout:[WRN] OSD_DOWN: 1 osds down 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: osd.1 (root=default,host=vm05) is down 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.2 is active+undersized+degraded, acting [5,6] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.4 is active+undersized+degraded, acting [0,7] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.9 is active+undersized+degraded, acting [7,3] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.a is active+undersized+degraded, acting [3,7] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.11 is active+undersized+degraded, acting [6,4] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.0 is active+undersized+degraded, acting [2,6] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.4 is active+undersized+degraded, acting [2,5] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.6 is active+undersized+degraded, acting [0,4] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.8 is active+undersized+degraded, acting [3,7] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.a is active+undersized+degraded, acting [6,4] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.e is active+undersized+degraded, acting [7,4] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.16 is active+undersized+degraded, acting [5,7] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.18 is active+undersized+degraded, acting [3,0] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.19 is active+undersized+degraded, acting [3,4] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.1a is active+undersized+degraded, acting [4,2] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.1c is active+undersized+degraded, acting [5,4] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 4.9 is active+undersized+degraded, acting [4,3] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 4.d is active+undersized+degraded, acting [4,2] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 4.e is active+undersized+degraded, acting [4,6] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 4.14 is active+undersized+degraded, acting [3,7] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 4.1f is active+undersized+degraded, acting [6,5] 2026-03-09T15:35:24.274 INFO:teuthology.orchestra.run.vm05.stdout: pg 6.1a is active+undersized+degraded, acting [4,5] 2026-03-09T15:35:24.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.1\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.1\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: pgmap v36: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 122 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: from='client.44131 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: from='client.44137 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 85/627 objects degraded (13.557%), 22 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["1"]}]': finished 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: osdmap e95: 8 total, 7 up, 8 in 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: from='osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294]' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: from='osd.1 ' entity='osd.1' cmd=[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:24.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:24 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/271323391' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:24.736 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:35:24 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:35:24.449+0000 7f881f95c640 -1 osd.1 92 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:35:25.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:25 vm09 ceph-mon[77297]: from='client.44146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:25.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:25 vm09 ceph-mon[77297]: from='client.44141 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:25.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:25 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/2480648574' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:35:25.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:25 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:25.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:25 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[88323]: from='client.44146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[88323]: from='client.44141 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/2480648574' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[86498]: from='client.44146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[86498]: from='client.44141 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/2480648574' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:25.442 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:25 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: OSD bench result of 8237.279493 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: pgmap v38: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 140 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294] boot 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:26 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: OSD bench result of 8237.279493 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: pgmap v38: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 140 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294] boot 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.363 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: OSD bench result of 8237.279493 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.1. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: pgmap v38: 161 pgs: 41 active+undersized, 22 active+undersized+degraded, 98 active+clean; 457 KiB data, 140 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: osd.1 [v2:192.168.123.105:6810/3433416294,v1:192.168.123.105:6811/3433416294] boot 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: osdmap e96: 8 total, 8 up, 8 in 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 1}]: dispatch 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.364 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:26 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:26.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:35:26.623+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (8 PGs are or would become offline) 2026-03-09T15:35:27.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:26.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T15:35:27.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: pgmap v41: 161 pgs: 6 peering, 26 active+undersized, 14 active+undersized+degraded, 115 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:27.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:27 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (8 PGs are or would become offline) 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: pgmap v41: 161 pgs: 6 peering, 26 active+undersized, 14 active+undersized+degraded, 115 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (8 PGs are or would become offline) 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: osdmap e97: 8 total, 8 up, 8 in 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: pgmap v41: 161 pgs: 6 peering, 26 active+undersized, 14 active+undersized+degraded, 115 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 50/627 objects degraded (7.974%) 2026-03-09T15:35:27.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:27.487 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:27 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (8 PGs are or would become offline) 2026-03-09T15:35:28.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:28 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 50/627 objects degraded (7.974%), 14 pgs degraded) 2026-03-09T15:35:28.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:28 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:35:28.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:28 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 50/627 objects degraded (7.974%), 14 pgs degraded) 2026-03-09T15:35:28.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:28 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:35:29.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:28 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 50/627 objects degraded (7.974%), 14 pgs degraded) 2026-03-09T15:35:29.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:28 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:35:29.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:29 vm05 ceph-mon[86498]: pgmap v42: 161 pgs: 6 peering, 155 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T15:35:29.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:29 vm05 ceph-mon[88323]: pgmap v42: 161 pgs: 6 peering, 155 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T15:35:30.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:29 vm09 ceph-mon[77297]: pgmap v42: 161 pgs: 6 peering, 155 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 852 B/s rd, 0 op/s 2026-03-09T15:35:30.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:30.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:30 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:31.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:30 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:31.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:31 vm05 ceph-mon[86498]: pgmap v43: 161 pgs: 6 peering, 155 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 689 B/s rd, 0 op/s 2026-03-09T15:35:31.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:31 vm05 ceph-mon[88323]: pgmap v43: 161 pgs: 6 peering, 155 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 689 B/s rd, 0 op/s 2026-03-09T15:35:32.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:31 vm09 ceph-mon[77297]: pgmap v43: 161 pgs: 6 peering, 155 active+clean; 457 KiB data, 141 MiB used, 160 GiB / 160 GiB avail; 689 B/s rd, 0 op/s 2026-03-09T15:35:32.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:32 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:32.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:35:32] "GET /metrics HTTP/1.1" 200 37903 "" "Prometheus/2.51.0" 2026-03-09T15:35:32.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:32 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:33.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:32 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:33.788 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:33 vm05 ceph-mon[86498]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:33.788 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:33.514Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:33.788 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:33.514Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:33.788 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:33.515Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:33.788 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:33.515Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:33.788 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:33 vm05 ceph-mon[88323]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:34.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:33 vm09 ceph-mon[77297]: pgmap v44: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:34.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:35.983 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:35 vm09 ceph-mon[77297]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:35:35.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:35 vm05 ceph-mon[86498]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:35:35.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:35 vm05 ceph-mon[88323]: pgmap v45: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:35:37.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:37.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:37 vm05 ceph-mon[86498]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 984 B/s rd, 0 op/s 2026-03-09T15:35:37.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:37 vm05 ceph-mon[88323]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 984 B/s rd, 0 op/s 2026-03-09T15:35:38.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:37 vm09 ceph-mon[77297]: pgmap v46: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 984 B/s rd, 0 op/s 2026-03-09T15:35:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:39 vm05 ceph-mon[86498]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:39.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:39 vm05 ceph-mon[88323]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:40.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:39 vm09 ceph-mon[77297]: pgmap v47: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:41.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:41 vm09 ceph-mon[77297]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:41.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:41 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:41.825 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:41 vm05 ceph-mon[88323]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:41.825 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:41 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:41.826 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:41 vm05 ceph-mon[86498]: pgmap v48: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:41.826 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:41 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:42.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:42.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:42.717 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: Upgrade: osd.2 is safe to restart 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: Upgrade: Updating osd.2 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[86498]: Deploying daemon osd.2 on vm05 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:35:42] "GET /metrics HTTP/1.1" 200 37903 "" "Prometheus/2.51.0" 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: Upgrade: osd.2 is safe to restart 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: Upgrade: Updating osd.2 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:42.718 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:42 vm05 ceph-mon[88323]: Deploying daemon osd.2 on vm05 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["2"], "max": 16}]: dispatch 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: Upgrade: osd.2 is safe to restart 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: Upgrade: Updating osd.2 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.2"}]: dispatch 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:42.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:42 vm09 ceph-mon[77297]: Deploying daemon osd.2 on vm05 2026-03-09T15:35:42.986 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:42 vm05 systemd[1]: Stopping Ceph osd.2 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:35:42.993 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[62868]: 2026-03-09T15:35:42.834+0000 7f3348602700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:35:42.993 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[62868]: 2026-03-09T15:35:42.834+0000 7f3348602700 -1 osd.2 97 *** Got signal Terminated *** 2026-03-09T15:35:42.993 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[62868]: 2026-03-09T15:35:42.834+0000 7f3348602700 -1 osd.2 97 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:35:43.766 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:43 vm05 ceph-mon[86498]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:43.766 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:43 vm05 ceph-mon[86498]: osd.2 marked itself down and dead 2026-03-09T15:35:43.767 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:43 vm05 ceph-mon[88323]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:43.767 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:43 vm05 ceph-mon[88323]: osd.2 marked itself down and dead 2026-03-09T15:35:43.767 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94442]: 2026-03-09 15:35:43.587951404 +0000 UTC m=+0.770525228 container died 21b53f2cd34c7f7f0f0ac164bba51ff139e443f38a7dca802fbacfb946631571 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, io.buildah.version=1.19.8, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, io.openshift.expose-services=, release=754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, name=centos-stream, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, build-date=2022-05-03T08:36:31.336870, ceph=True, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, distribution-scope=public, version=8, io.openshift.tags=base centos centos-stream, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., maintainer=Guillaume Abrioux , GIT_CLEAN=True, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb) 2026-03-09T15:35:43.767 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94442]: 2026-03-09 15:35:43.611525243 +0000 UTC m=+0.794099067 container remove 21b53f2cd34c7f7f0f0ac164bba51ff139e443f38a7dca802fbacfb946631571 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, vcs-type=git, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., release=754, distribution-scope=public, version=8, io.openshift.expose-services=, CEPH_POINT_RELEASE=-17.2.0, GIT_CLEAN=True, RELEASE=HEAD, name=centos-stream, architecture=x86_64) 2026-03-09T15:35:43.767 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 bash[94442]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2 2026-03-09T15:35:43.767 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.765242821 +0000 UTC m=+0.020236887 container create 4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:35:43.768 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:43.515Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:43.768 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:43.515Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:43.768 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:43.516Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:43.768 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:43.516Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:43.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:43 vm09 ceph-mon[77297]: pgmap v49: 161 pgs: 161 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:35:43.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:43 vm09 ceph-mon[77297]: osd.2 marked itself down and dead 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.813906997 +0000 UTC m=+0.068901073 container init 4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS) 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.817426561 +0000 UTC m=+0.072420627 container start 4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, org.label-schema.license=GPLv2, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.8182597 +0000 UTC m=+0.073253766 container attach 4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3) 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.755948524 +0000 UTC m=+0.010942601 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 conmon[94520]: conmon 4fb479b97355a72c8a4d : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532.scope/container/memory.events 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.962749225 +0000 UTC m=+0.217743291 container died 4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default) 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:43 vm05 podman[94509]: 2026-03-09 15:35:43.994971493 +0000 UTC m=+0.249965549 container remove 4fb479b97355a72c8a4de4d76c09ec1ccda035afc93fd48971c036c9f6768532 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.2.service: Deactivated successfully. 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 systemd[1]: Stopped Ceph osd.2 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:35:44.023 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.2.service: Consumed 3.143s CPU time. 2026-03-09T15:35:44.317 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 systemd[1]: Starting Ceph osd.2 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:35:44.538 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:44 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:44.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:44.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:44 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:35:44.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:44 vm05 ceph-mon[86498]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T15:35:44.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:44 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:35:44.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:44 vm05 ceph-mon[88323]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 podman[94617]: 2026-03-09 15:35:44.326805061 +0000 UTC m=+0.022539525 container create dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223) 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 podman[94617]: 2026-03-09 15:35:44.369230808 +0000 UTC m=+0.064965281 container init dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 podman[94617]: 2026-03-09 15:35:44.3734711 +0000 UTC m=+0.069205573 container start dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2) 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 podman[94617]: 2026-03-09 15:35:44.374426518 +0000 UTC m=+0.070160981 container attach dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 podman[94617]: 2026-03-09 15:35:44.318593051 +0000 UTC m=+0.014327524 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 bash[94617]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:44.738 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:44 vm05 bash[94617]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:44.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:44 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:35:44.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:44 vm09 ceph-mon[77297]: osdmap e98: 8 total, 7 up, 8 in 2026-03-09T15:35:45.456 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4d55ea78-49b8-497f-a2da-524814ce9504/osd-block-3556c187-377e-47cc-8f72-be4edaa111a4 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T15:35:45.457 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-4d55ea78-49b8-497f-a2da-524814ce9504/osd-block-3556c187-377e-47cc-8f72-be4edaa111a4 --path /var/lib/ceph/osd/ceph-2 --no-mon-config 2026-03-09T15:35:45.710 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[86498]: pgmap v51: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[86498]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[88323]: pgmap v51: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[88323]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:45.711 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/ln -snf /dev/ceph-4d55ea78-49b8-497f-a2da-524814ce9504/osd-block-3556c187-377e-47cc-8f72-be4edaa111a4 /var/lib/ceph/osd/ceph-2/block 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/ln -snf /dev/ceph-4d55ea78-49b8-497f-a2da-524814ce9504/osd-block-3556c187-377e-47cc-8f72-be4edaa111a4 /var/lib/ceph/osd/ceph-2/block 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-2/block 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate[94632]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94617]: --> ceph-volume lvm activate successful for osd ID: 2 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 conmon[94632]: conmon dd64f4cad195323ca44b : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c.scope/container/memory.events 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 podman[94617]: 2026-03-09 15:35:45.488544941 +0000 UTC m=+1.184279424 container died dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate, org.label-schema.build-date=20260223, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:35:45.711 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 podman[94617]: 2026-03-09 15:35:45.58827718 +0000 UTC m=+1.284011653 container remove dd64f4cad195323ca44b8bfc3f9f46ff07d0d6f3e643e5f3b0b0a559a7cfd32c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-activate, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 podman[94885]: 2026-03-09 15:35:45.709484161 +0000 UTC m=+0.019086446 container create 101cc91253b59d1d8826ffc42e5dc712b06d246879672f31c1c93053e95905b0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, ceph=True, CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 podman[94885]: 2026-03-09 15:35:45.784722409 +0000 UTC m=+0.094324703 container init 101cc91253b59d1d8826ffc42e5dc712b06d246879672f31c1c93053e95905b0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 podman[94885]: 2026-03-09 15:35:45.788078158 +0000 UTC m=+0.097680443 container start 101cc91253b59d1d8826ffc42e5dc712b06d246879672f31c1c93053e95905b0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.vendor=CentOS) 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 podman[94885]: 2026-03-09 15:35:45.70173761 +0000 UTC m=+0.011339904 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 ceph-osd[94899]: -- 192.168.123.105:0/346279181 <== mon.0 v2:192.168.123.105:3300/0 4 ==== auth_reply(proto 2 0 (0) Success) ==== 194+0+0 (secure 0 0 0) 0x55b2e4ece960 con 0x55b2e5cb8400 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 bash[94885]: 101cc91253b59d1d8826ffc42e5dc712b06d246879672f31c1c93053e95905b0 2026-03-09T15:35:45.987 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:45 vm05 systemd[1]: Started Ceph osd.2 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:35:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:45 vm09 ceph-mon[77297]: pgmap v51: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:35:46.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:45 vm09 ceph-mon[77297]: osdmap e99: 8 total, 7 up, 8 in 2026-03-09T15:35:46.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:46.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:35:46.608 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:35:46.605+0000 7fb3a8e4e740 -1 Falling back to public interface 2026-03-09T15:35:47.196 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.196 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.196 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.196 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:46 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:47.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:46 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:46 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.940 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:47 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:35:47.501+0000 7fb3a8e4e740 -1 osd.2 0 read_superblock omap replica is missing. 2026-03-09T15:35:47.940 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:47 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:35:47.525+0000 7fb3a8e4e740 -1 osd.2 97 log_to_monitors true 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: pgmap v53: 161 pgs: 27 active+undersized, 4 stale+active+clean, 8 active+undersized+degraded, 122 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:47.942 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: pgmap v53: 161 pgs: 27 active+undersized, 4 stale+active+clean, 8 active+undersized+degraded, 122 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.200 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: pgmap v53: 161 pgs: 27 active+undersized, 4 stale+active+clean, 8 active+undersized+degraded, 122 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 8 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]: dispatch 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:48.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.702 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:49 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:35:49.446+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: pgmap v54: 161 pgs: 34 active+undersized, 12 active+undersized+degraded, 115 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: osdmap e100: 8 total, 7 up, 8 in 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: pgmap v54: 161 pgs: 34 active+undersized, 12 active+undersized+degraded, 115 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: osdmap e100: 8 total, 7 up, 8 in 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:49.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:49.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:49.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:49.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: pgmap v54: 161 pgs: 34 active+undersized, 12 active+undersized+degraded, 115 active+clean; 457 KiB data, 142 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["2"]}]': finished 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: osdmap e100: 8 total, 7 up, 8 in 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392]' entity='osd.2' cmd=[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:50.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:35:50.486 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:35:50 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:35:50.064+0000 7fb3a03f8640 -1 osd.2 97 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:35:50.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:50 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:35:50.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:50 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-09T15:35:50.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:50 vm05 ceph-mon[86498]: OSD bench result of 33227.096706 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:50.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:50 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:35:50.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:50 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-09T15:35:50.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:50 vm05 ceph-mon[88323]: OSD bench result of 33227.096706 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:50 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:35:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:50 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (14 PGs are or would become offline) 2026-03-09T15:35:51.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:50 vm09 ceph-mon[77297]: OSD bench result of 33227.096706 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.2. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[86498]: pgmap v56: 161 pgs: 34 active+undersized, 12 active+undersized+degraded, 115 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[86498]: osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392] boot 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[86498]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[88323]: pgmap v56: 161 pgs: 34 active+undersized, 12 active+undersized+degraded, 115 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[88323]: osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392] boot 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[88323]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T15:35:51.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:35:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:51 vm09 ceph-mon[77297]: pgmap v56: 161 pgs: 34 active+undersized, 12 active+undersized+degraded, 115 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 55/627 objects degraded (8.772%) 2026-03-09T15:35:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:51 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:35:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:51 vm09 ceph-mon[77297]: osd.2 [v2:192.168.123.105:6818/1278408392,v1:192.168.123.105:6819/1278408392] boot 2026-03-09T15:35:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:51 vm09 ceph-mon[77297]: osdmap e101: 8 total, 8 up, 8 in 2026-03-09T15:35:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 2}]: dispatch 2026-03-09T15:35:52.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:52 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:52.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:52 vm05 ceph-mon[86498]: osdmap e102: 8 total, 8 up, 8 in 2026-03-09T15:35:52.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:35:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:35:52] "GET /metrics HTTP/1.1" 200 37896 "" "Prometheus/2.51.0" 2026-03-09T15:35:52.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:52 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:52.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:52 vm05 ceph-mon[88323]: osdmap e102: 8 total, 8 up, 8 in 2026-03-09T15:35:53.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:52 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:35:53.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:52 vm09 ceph-mon[77297]: osdmap e102: 8 total, 8 up, 8 in 2026-03-09T15:35:53.887 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:53 vm05 ceph-mon[86498]: pgmap v59: 161 pgs: 29 peering, 13 active+undersized, 4 active+undersized+degraded, 115 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 22/627 objects degraded (3.509%) 2026-03-09T15:35:53.888 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:53 vm05 ceph-mon[86498]: Health check update: Degraded data redundancy: 22/627 objects degraded (3.509%), 4 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:53.888 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:53 vm05 ceph-mon[88323]: pgmap v59: 161 pgs: 29 peering, 13 active+undersized, 4 active+undersized+degraded, 115 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 22/627 objects degraded (3.509%) 2026-03-09T15:35:53.888 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:53 vm05 ceph-mon[88323]: Health check update: Degraded data redundancy: 22/627 objects degraded (3.509%), 4 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:53.888 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:53.515Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:53.888 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:53.516Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:53.888 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:53.516Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:53.888 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:35:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:35:53.516Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:35:54.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:53 vm09 ceph-mon[77297]: pgmap v59: 161 pgs: 29 peering, 13 active+undersized, 4 active+undersized+degraded, 115 active+clean; 457 KiB data, 161 MiB used, 160 GiB / 160 GiB avail; 22/627 objects degraded (3.509%) 2026-03-09T15:35:54.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:53 vm09 ceph-mon[77297]: Health check update: Degraded data redundancy: 22/627 objects degraded (3.509%), 4 pgs degraded (PG_DEGRADED) 2026-03-09T15:35:54.511 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:35:54.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:54.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-09T15:35:54.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:54.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.2\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.2\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:54.794 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:54 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 22/627 objects degraded (3.509%), 4 pgs degraded) 2026-03-09T15:35:54.794 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:54 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (2m) 7s ago 7m 24.9M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (109s) 67s ago 6m 42.6M - dad864ee21e9 6a58314a043e 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (97s) 7s ago 6m 50.3M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (94s) 67s ago 8m 487M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (107s) 7s ago 9m 553M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (89s) 7s ago 9m 46.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (73s) 67s ago 8m 21.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (78s) 7s ago 8m 37.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (2m) 7s ago 7m 9629k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (116s) 67s ago 7m 9202k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (55s) 7s ago 8m 70.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (33s) 7s ago 8m 69.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (9s) 7s ago 8m 14.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (8m) 7s ago 8m 57.1M 4096M 17.2.0 e1d6a67b021e b4398847e195 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (7m) 67s ago 7m 52.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (7m) 67s ago 7m 50.4M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (7m) 67s ago 7m 51.1M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (7m) 67s ago 7m 51.4M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (96s) 67s ago 7m 42.6M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (6m) 7s ago 6m 92.9M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (6m) 67s ago 6m 91.7M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (6m) 7s ago 6m 94.1M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:35:54.924 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (6m) 67s ago 6m 90.7M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:35:55.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:54 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 22/627 objects degraded (3.509%), 4 pgs degraded) 2026-03-09T15:35:55.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:54 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:35:55.152 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:54 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 22/627 objects degraded (3.509%), 4 pgs degraded) 2026-03-09T15:35:55.152 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:54 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:35:55.153 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:35:55.154 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:35:55.154 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 9, 2026-03-09T15:35:55.154 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T15:35:55.154 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:35:55.154 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "mon" 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "8/23 daemons upgraded", 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:35:55.350 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:35:55.580 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[88323]: pgmap v60: 161 pgs: 29 peering, 132 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[88323]: from='client.44167 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[88323]: from='client.34194 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[88323]: from='client.34197 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/674555064' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/2732241107' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[86498]: pgmap v60: 161 pgs: 29 peering, 132 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[86498]: from='client.44167 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[86498]: from='client.34194 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[86498]: from='client.34197 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/674555064' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:55 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/2732241107' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:35:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:55 vm09 ceph-mon[77297]: pgmap v60: 161 pgs: 29 peering, 132 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:35:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:55 vm09 ceph-mon[77297]: from='client.44167 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:55 vm09 ceph-mon[77297]: from='client.34194 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:55 vm09 ceph-mon[77297]: from='client.34197 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:55 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/674555064' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:35:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:55 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/2732241107' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:35:57.062 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:35:56 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:35:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:35:57.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:56 vm09 ceph-mon[77297]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:57.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:56 vm05 ceph-mon[86498]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:57.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:56 vm05 ceph-mon[88323]: from='client.34206 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:35:58.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:57 vm09 ceph-mon[77297]: pgmap v61: 161 pgs: 3 peering, 158 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 662 B/s rd, 0 op/s 2026-03-09T15:35:58.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:57 vm05 ceph-mon[86498]: pgmap v61: 161 pgs: 3 peering, 158 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 662 B/s rd, 0 op/s 2026-03-09T15:35:58.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:57 vm05 ceph-mon[88323]: pgmap v61: 161 pgs: 3 peering, 158 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 662 B/s rd, 0 op/s 2026-03-09T15:36:00.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:35:59 vm05 ceph-mon[86498]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:36:00.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:35:59 vm05 ceph-mon[88323]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:36:00.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:35:59 vm09 ceph-mon[77297]: pgmap v62: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:36:01.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:01 vm09 ceph-mon[77297]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:36:01.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:01 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:01.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:01 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:01 vm05 ceph-mon[86498]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:36:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:01 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:01 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:01 vm05 ceph-mon[88323]: pgmap v63: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:36:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:01 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:01 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:02.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:02 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:02.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:02 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:02.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:36:02] "GET /metrics HTTP/1.1" 200 37918 "" "Prometheus/2.51.0" 2026-03-09T15:36:02.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:02 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:03.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:03 vm09 ceph-mon[77297]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 951 B/s rd, 0 op/s 2026-03-09T15:36:03.916 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:03 vm05 ceph-mon[86498]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 951 B/s rd, 0 op/s 2026-03-09T15:36:03.916 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:03.516Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:03.916 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:03.516Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:03.916 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:03.516Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:03.916 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:03.516Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:03.916 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:03 vm05 ceph-mon[88323]: pgmap v64: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 951 B/s rd, 0 op/s 2026-03-09T15:36:04.530 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.3\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:04.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:36:04.949 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:36:04.949 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:36:05.736 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:05 vm05 systemd[1]: Stopping Ceph osd.3 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:36:05.736 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:05 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[65656]: 2026-03-09T15:36:05.627+0000 7f2095d84700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:36:05.736 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:05 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[65656]: 2026-03-09T15:36:05.627+0000 7f2095d84700 -1 osd.3 102 *** Got signal Terminated *** 2026-03-09T15:36:05.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:05 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[65656]: 2026-03-09T15:36:05.627+0000 7f2095d84700 -1 osd.3 102 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: Upgrade: osd.3 is safe to restart 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: Upgrade: Updating osd.3 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: Deploying daemon osd.3 on vm05 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:06.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:05 vm09 ceph-mon[77297]: osd.3 marked itself down and dead 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: Upgrade: osd.3 is safe to restart 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: Upgrade: Updating osd.3 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: Deploying daemon osd.3 on vm05 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[86498]: osd.3 marked itself down and dead 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["3"], "max": 16}]: dispatch 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: Upgrade: osd.3 is safe to restart 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: pgmap v65: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: Upgrade: Updating osd.3 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.3"}]: dispatch 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: Deploying daemon osd.3 on vm05 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:06.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:05 vm05 ceph-mon[88323]: osd.3 marked itself down and dead 2026-03-09T15:36:06.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:06 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:06.870 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:06 vm05 ceph-mon[86498]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T15:36:06.871 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:06 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:06 vm05 ceph-mon[88323]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96545]: 2026-03-09 15:36:06.609876039 +0000 UTC m=+1.017331841 container died b4398847e1954eed91a61324aaf3c34b452038001dfdf9791fdd5b42ec1050f2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, maintainer=Guillaume Abrioux , version=8, architecture=x86_64, GIT_BRANCH=HEAD, io.k8s.display-name=CentOS Stream 8, vcs-type=git, vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, release=754, com.redhat.component=centos-stream-container, io.openshift.expose-services=, ceph=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, distribution-scope=public, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, GIT_CLEAN=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, RELEASE=HEAD, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, CEPH_POINT_RELEASE=-17.2.0, build-date=2022-05-03T08:36:31.336870) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96545]: 2026-03-09 15:36:06.635243126 +0000 UTC m=+1.042698928 container remove b4398847e1954eed91a61324aaf3c34b452038001dfdf9791fdd5b42ec1050f2 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.display-name=CentOS Stream 8, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, io.buildah.version=1.19.8, RELEASE=HEAD, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, version=8, distribution-scope=public, GIT_BRANCH=HEAD, GIT_CLEAN=True, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, io.openshift.expose-services=, vendor=Red Hat, Inc., architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.tags=base centos centos-stream, build-date=2022-05-03T08:36:31.336870, com.redhat.component=centos-stream-container, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, name=centos-stream, vcs-type=git, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 bash[96545]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96610]: 2026-03-09 15:36:06.801594655 +0000 UTC m=+0.041644954 container create 3c8205404c92b58fac5f7144d46be3eec24e3ff9cea32b92027ccfd637ff7357 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96610]: 2026-03-09 15:36:06.848204071 +0000 UTC m=+0.088254370 container init 3c8205404c92b58fac5f7144d46be3eec24e3ff9cea32b92027ccfd637ff7357 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, org.label-schema.vendor=CentOS, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96610]: 2026-03-09 15:36:06.851122023 +0000 UTC m=+0.091172312 container start 3c8205404c92b58fac5f7144d46be3eec24e3ff9cea32b92027ccfd637ff7357 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.license=GPLv2) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96610]: 2026-03-09 15:36:06.857744708 +0000 UTC m=+0.097795017 container attach 3c8205404c92b58fac5f7144d46be3eec24e3ff9cea32b92027ccfd637ff7357 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, ceph=True) 2026-03-09T15:36:06.871 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96610]: 2026-03-09 15:36:06.770542537 +0000 UTC m=+0.010592847 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:07.062 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:07.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:06 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:07.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:06 vm09 ceph-mon[77297]: osdmap e103: 8 total, 7 up, 8 in 2026-03-09T15:36:07.218 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:06 vm05 podman[96610]: 2026-03-09 15:36:06.993439708 +0000 UTC m=+0.233490007 container died 3c8205404c92b58fac5f7144d46be3eec24e3ff9cea32b92027ccfd637ff7357 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T15:36:07.218 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 podman[96610]: 2026-03-09 15:36:07.011579921 +0000 UTC m=+0.251630220 container remove 3c8205404c92b58fac5f7144d46be3eec24e3ff9cea32b92027ccfd637ff7357 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, org.label-schema.schema-version=1.0, CEPH_REF=squid, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:36:07.218 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.3.service: Deactivated successfully. 2026-03-09T15:36:07.218 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 systemd[1]: Stopped Ceph osd.3 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:36:07.218 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.3.service: Consumed 23.879s CPU time. 2026-03-09T15:36:07.218 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 systemd[1]: Starting Ceph osd.3 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:36:07.486 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 podman[96713]: 2026-03-09 15:36:07.322390412 +0000 UTC m=+0.019782306 container create c543a7ce3e723dc8f33cfdd9b82555584e3d3815022106091ad2a8bdbd90742e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:36:07.486 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 podman[96713]: 2026-03-09 15:36:07.36602498 +0000 UTC m=+0.063416874 container init c543a7ce3e723dc8f33cfdd9b82555584e3d3815022106091ad2a8bdbd90742e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-09T15:36:07.486 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 podman[96713]: 2026-03-09 15:36:07.368854236 +0000 UTC m=+0.066246120 container start c543a7ce3e723dc8f33cfdd9b82555584e3d3815022106091ad2a8bdbd90742e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:36:07.487 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 podman[96713]: 2026-03-09 15:36:07.373721184 +0000 UTC m=+0.071113078 container attach c543a7ce3e723dc8f33cfdd9b82555584e3d3815022106091ad2a8bdbd90742e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T15:36:07.487 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 podman[96713]: 2026-03-09 15:36:07.314428141 +0000 UTC m=+0.011820044 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:07.487 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:07.487 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:07.487 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:07.487 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:08.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:07 vm05 ceph-mon[86498]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:08.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:07 vm05 ceph-mon[86498]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T15:36:08.236 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:36:08.236 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:08.236 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:36:08.236 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:08.236 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:08.237 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:08.237 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T15:36:08.237 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T15:36:08.237 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-313253ad-44f5-4298-acac-15efa7e1dea3/osd-block-fe8044ac-8ad4-4057-a7f5-e8e77db769c3 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-09T15:36:08.237 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:07 vm05 bash[96713]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-313253ad-44f5-4298-acac-15efa7e1dea3/osd-block-fe8044ac-8ad4-4057-a7f5-e8e77db769c3 --path /var/lib/ceph/osd/ceph-3 --no-mon-config 2026-03-09T15:36:08.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:07 vm05 ceph-mon[88323]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:08.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:07 vm05 ceph-mon[88323]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T15:36:08.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:07 vm09 ceph-mon[77297]: pgmap v66: 161 pgs: 161 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:08.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:07 vm09 ceph-mon[77297]: osdmap e104: 8 total, 7 up, 8 in 2026-03-09T15:36:08.736 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/ln -snf /dev/ceph-313253ad-44f5-4298-acac-15efa7e1dea3/osd-block-fe8044ac-8ad4-4057-a7f5-e8e77db769c3 /var/lib/ceph/osd/ceph-3/block 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 bash[96713]: Running command: /usr/bin/ln -snf /dev/ceph-313253ad-44f5-4298-acac-15efa7e1dea3/osd-block-fe8044ac-8ad4-4057-a7f5-e8e77db769c3 /var/lib/ceph/osd/ceph-3/block 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 bash[96713]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 bash[96713]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 bash[96713]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate[96724]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 bash[96713]: --> ceph-volume lvm activate successful for osd ID: 3 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 podman[96713]: 2026-03-09 15:36:08.294929657 +0000 UTC m=+0.992321561 container died c543a7ce3e723dc8f33cfdd9b82555584e3d3815022106091ad2a8bdbd90742e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 podman[96713]: 2026-03-09 15:36:08.314516506 +0000 UTC m=+1.011908400 container remove c543a7ce3e723dc8f33cfdd9b82555584e3d3815022106091ad2a8bdbd90742e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-activate, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0) 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 podman[96960]: 2026-03-09 15:36:08.40727214 +0000 UTC m=+0.017050974 container create 7796c013c7ba6af2769d6c7596349134cd8ee02ac202ebfc11fdc0fb9f6e705f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 podman[96960]: 2026-03-09 15:36:08.437751967 +0000 UTC m=+0.047530801 container init 7796c013c7ba6af2769d6c7596349134cd8ee02ac202ebfc11fdc0fb9f6e705f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default) 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 podman[96960]: 2026-03-09 15:36:08.444901317 +0000 UTC m=+0.054680151 container start 7796c013c7ba6af2769d6c7596349134cd8ee02ac202ebfc11fdc0fb9f6e705f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 bash[96960]: 7796c013c7ba6af2769d6c7596349134cd8ee02ac202ebfc11fdc0fb9f6e705f 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 podman[96960]: 2026-03-09 15:36:08.399946609 +0000 UTC m=+0.009725443 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:08.737 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 systemd[1]: Started Ceph osd.3 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:36:09.210 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:36:08.771+0000 7f67faf01740 -1 Falling back to public interface 2026-03-09T15:36:09.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:09 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:09.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:09 vm05 ceph-mon[86498]: pgmap v69: 161 pgs: 9 active+undersized, 21 stale+active+clean, 5 active+undersized+degraded, 126 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 18/627 objects degraded (2.871%) 2026-03-09T15:36:09.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:09 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:09.736 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:36:09.624+0000 7f67faf01740 -1 osd.3 0 read_superblock omap replica is missing. 2026-03-09T15:36:09.736 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:09 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:36:09.672+0000 7f67faf01740 -1 osd.3 102 log_to_monitors true 2026-03-09T15:36:09.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:09 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:09.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:09 vm05 ceph-mon[88323]: pgmap v69: 161 pgs: 9 active+undersized, 21 stale+active+clean, 5 active+undersized+degraded, 126 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 18/627 objects degraded (2.871%) 2026-03-09T15:36:09.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:09 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:09.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:09 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:09.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:09 vm09 ceph-mon[77297]: pgmap v69: 161 pgs: 9 active+undersized, 21 stale+active+clean, 5 active+undersized+degraded, 126 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s; 18/627 objects degraded (2.871%) 2026-03-09T15:36:09.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:09 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 18/627 objects degraded (2.871%), 5 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:10.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[86498]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T15:36:10.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 18/627 objects degraded (2.871%), 5 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[88323]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:10 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 18/627 objects degraded (2.871%), 5 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:10.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:10 vm09 ceph-mon[77297]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]: dispatch 2026-03-09T15:36:10.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:10.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:11.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[86498]: pgmap v70: 161 pgs: 14 active+undersized, 20 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 20/627 objects degraded (3.190%) 2026-03-09T15:36:11.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[86498]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T15:36:11.637 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[86498]: osdmap e105: 8 total, 7 up, 8 in 2026-03-09T15:36:11.638 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[86498]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:36:11.638 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:36:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:36:11.383+0000 7f67f24ab640 -1 osd.3 102 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:36:11.639 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[88323]: pgmap v70: 161 pgs: 14 active+undersized, 20 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 20/627 objects degraded (3.190%) 2026-03-09T15:36:11.639 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[88323]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T15:36:11.639 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[88323]: osdmap e105: 8 total, 7 up, 8 in 2026-03-09T15:36:11.639 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:11 vm05 ceph-mon[88323]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:36:11.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:11 vm09 ceph-mon[77297]: pgmap v70: 161 pgs: 14 active+undersized, 20 stale+active+clean, 6 active+undersized+degraded, 121 active+clean; 457 KiB data, 162 MiB used, 160 GiB / 160 GiB avail; 511 B/s rd, 0 op/s; 20/627 objects degraded (3.190%) 2026-03-09T15:36:11.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:11 vm09 ceph-mon[77297]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["3"]}]': finished 2026-03-09T15:36:11.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:11 vm09 ceph-mon[77297]: osdmap e105: 8 total, 7 up, 8 in 2026-03-09T15:36:11.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:11 vm09 ceph-mon[77297]: from='osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444]' entity='osd.3' cmd=[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm05", "root=default"]}]: dispatch 2026-03-09T15:36:11.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:11 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:36:11.635+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:12.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:12.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444] boot 2026-03-09T15:36:12.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T15:36:12.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:12.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:12 vm09 ceph-mon[77297]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444] boot 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[86498]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:36:12] "GET /metrics HTTP/1.1" 200 37918 "" "Prometheus/2.51.0" 2026-03-09T15:36:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: osd.3 [v2:192.168.123.105:6826/3277861444,v1:192.168.123.105:6827/3277861444] boot 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: osdmap e106: 8 total, 8 up, 8 in 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 3}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:12.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:12 vm05 ceph-mon[88323]: osdmap e107: 8 total, 8 up, 8 in 2026-03-09T15:36:13.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:13 vm09 ceph-mon[77297]: OSD bench result of 27614.621390 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:36:13.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:13 vm09 ceph-mon[77297]: pgmap v73: 161 pgs: 41 active+undersized, 25 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 82/627 objects degraded (13.078%) 2026-03-09T15:36:13.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:13 vm05 ceph-mon[86498]: OSD bench result of 27614.621390 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:36:13.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:13 vm05 ceph-mon[86498]: pgmap v73: 161 pgs: 41 active+undersized, 25 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 82/627 objects degraded (13.078%) 2026-03-09T15:36:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:13.516Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:13.516Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:13.517Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:13.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:13.517Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:13.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:13 vm05 ceph-mon[88323]: OSD bench result of 27614.621390 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.3. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:36:13.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:13 vm05 ceph-mon[88323]: pgmap v73: 161 pgs: 41 active+undersized, 25 active+undersized+degraded, 95 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 82/627 objects degraded (13.078%) 2026-03-09T15:36:14.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.3\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.3\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:14.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:14 vm05 ceph-mon[86498]: Health check update: Degraded data redundancy: 70/627 objects degraded (11.164%), 21 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:14.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:14 vm05 ceph-mon[88323]: Health check update: Degraded data redundancy: 70/627 objects degraded (11.164%), 21 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:15.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:14 vm09 ceph-mon[77297]: Health check update: Degraded data redundancy: 70/627 objects degraded (11.164%), 21 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:15 vm05 ceph-mon[86498]: pgmap v75: 161 pgs: 32 active+undersized, 21 active+undersized+degraded, 108 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 70/627 objects degraded (11.164%) 2026-03-09T15:36:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:15.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:15 vm05 ceph-mon[88323]: pgmap v75: 161 pgs: 32 active+undersized, 21 active+undersized+degraded, 108 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 70/627 objects degraded (11.164%) 2026-03-09T15:36:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:15.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:15 vm09 ceph-mon[77297]: pgmap v75: 161 pgs: 32 active+undersized, 21 active+undersized+degraded, 108 active+clean; 457 KiB data, 181 MiB used, 160 GiB / 160 GiB avail; 70/627 objects degraded (11.164%) 2026-03-09T15:36:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:16.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:16.950 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:16 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 70/627 objects degraded (11.164%), 21 pgs degraded) 2026-03-09T15:36:16.950 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:16 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:36:16.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:16 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 70/627 objects degraded (11.164%), 21 pgs degraded) 2026-03-09T15:36:16.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:16 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:36:16.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:16 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 70/627 objects degraded (11.164%), 21 pgs degraded) 2026-03-09T15:36:16.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:16 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:36:17.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:17.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:17 vm05 ceph-mon[86498]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:17.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:17 vm05 ceph-mon[88323]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:17 vm09 ceph-mon[77297]: pgmap v76: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:19.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:19 vm05 ceph-mon[86498]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:19.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:19 vm05 ceph-mon[88323]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:20.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:19 vm09 ceph-mon[77297]: pgmap v77: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:21.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:21 vm05 ceph-mon[86498]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:21.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:21 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:21.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:21 vm05 ceph-mon[88323]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:21.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:21 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:22.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:21 vm09 ceph-mon[77297]: pgmap v78: 161 pgs: 161 active+clean; 457 KiB data, 582 MiB used, 159 GiB / 160 GiB avail 2026-03-09T15:36:22.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:21 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:22.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:36:22] "GET /metrics HTTP/1.1" 200 37843 "" "Prometheus/2.51.0" 2026-03-09T15:36:22.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:22 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:22.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:22 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:23.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:22 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:23.887 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:23 vm05 ceph-mon[86498]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 409 B/s rd, 0 op/s 2026-03-09T15:36:23.887 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:23.517Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:23.887 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:23.517Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:23.887 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:23.518Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:23.887 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:23.518Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:23.887 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:23 vm05 ceph-mon[88323]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 409 B/s rd, 0 op/s 2026-03-09T15:36:24.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:23 vm09 ceph-mon[77297]: pgmap v79: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 409 B/s rd, 0 op/s 2026-03-09T15:36:24.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:24.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:25.812 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:25 vm05 ceph-mon[86498]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T15:36:25.812 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:25 vm05 ceph-mon[88323]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T15:36:25.812 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:36:26.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:25 vm09 ceph-mon[77297]: pgmap v80: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (2m) 16s ago 7m 24.9M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (2m) 98s ago 7m 42.6M - dad864ee21e9 6a58314a043e 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (2m) 16s ago 7m 50.4M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (2m) 98s ago 9m 487M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (2m) 16s ago 9m 554M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (2m) 16s ago 10m 50.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (105s) 98s ago 9m 21.5M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (110s) 16s ago 9m 40.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (2m) 16s ago 7m 9763k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (2m) 98s ago 7m 9202k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (87s) 16s ago 9m 70.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (64s) 16s ago 8m 71.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (40s) 16s ago 8m 66.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (17s) 16s ago 8m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7796c013c7ba 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (8m) 98s ago 8m 52.4M 4096M 17.2.0 e1d6a67b021e 00685022776e 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (8m) 98s ago 8m 50.4M 4096M 17.2.0 e1d6a67b021e fbdec571623e 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (8m) 98s ago 8m 51.1M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (8m) 98s ago 8m 51.4M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (2m) 98s ago 7m 42.6M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (7m) 16s ago 7m 93.0M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (7m) 98s ago 7m 91.7M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (7m) 16s ago 7m 94.3M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:36:26.214 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (7m) 98s ago 7m 90.7M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4, 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 8, 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 9 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:36:26.459 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "mon" 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "9/23 daemons upgraded", 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:36:26.677 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:36:26.886 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:26 vm09 ceph-mon[77297]: from='client.44197 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:26.886 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:26 vm09 ceph-mon[77297]: from='client.34230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:26.886 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:26 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1120763938' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:26.930 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:36:26.930 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:26 vm05 ceph-mon[86498]: from='client.44197 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:26.930 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:26 vm05 ceph-mon[86498]: from='client.34230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:26.930 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:26 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1120763938' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:26.930 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:26 vm05 ceph-mon[88323]: from='client.44197 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:26.930 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:26 vm05 ceph-mon[88323]: from='client.34230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:26.930 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:26 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1120763938' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:27.164 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='client.44206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: Upgrade: osd.4 is safe to restart 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='client.44198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/4057099469' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: Upgrade: Updating osd.4 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:27.698 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:27 vm09 ceph-mon[77297]: Deploying daemon osd.4 on vm09 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='client.44206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: Upgrade: osd.4 is safe to restart 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='client.44198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/4057099469' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: Upgrade: Updating osd.4 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[86498]: Deploying daemon osd.4 on vm09 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='client.44206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: pgmap v81: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 767 B/s rd, 0 op/s 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["4"], "max": 16}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: Upgrade: osd.4 is safe to restart 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='client.44198 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/4057099469' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: Upgrade: Updating osd.4 2026-03-09T15:36:27.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:27.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.4"}]: dispatch 2026-03-09T15:36:27.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:27.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:27 vm05 ceph-mon[88323]: Deploying daemon osd.4 on vm09 2026-03-09T15:36:28.063 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:27 vm09 systemd[1]: Stopping Ceph osd.4 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:36:28.063 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[52679]: 2026-03-09T15:36:27.801+0000 7f686a679700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:36:28.063 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[52679]: 2026-03-09T15:36:27.801+0000 7f686a679700 -1 osd.4 107 *** Got signal Terminated *** 2026-03-09T15:36:28.063 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[52679]: 2026-03-09T15:36:27.801+0000 7f686a679700 -1 osd.4 107 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:36:28.906 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79261]: 2026-03-09 15:36:28.702432249 +0000 UTC m=+0.915634049 container died 00685022776eac64094e90bad85c195a6545d65421174598669569bd341be0b0 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, build-date=2022-05-03T08:36:31.336870, release=754, GIT_BRANCH=HEAD, architecture=x86_64, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.openshift.tags=base centos centos-stream, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, CEPH_POINT_RELEASE=-17.2.0, vendor=Red Hat, Inc., com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, distribution-scope=public, name=centos-stream, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, version=8, io.buildah.version=1.19.8, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, GIT_REPO=https://github.com/ceph/ceph-container.git, maintainer=Guillaume Abrioux , GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.expose-services=, io.k8s.display-name=CentOS Stream 8, vcs-type=git, RELEASE=HEAD) 2026-03-09T15:36:28.906 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79261]: 2026-03-09 15:36:28.728556925 +0000 UTC m=+0.941758726 container remove 00685022776eac64094e90bad85c195a6545d65421174598669569bd341be0b0 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, CEPH_POINT_RELEASE=-17.2.0, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, build-date=2022-05-03T08:36:31.336870, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, release=754, GIT_BRANCH=HEAD, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., com.redhat.component=centos-stream-container, io.k8s.display-name=CentOS Stream 8, name=centos-stream, version=8, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_REPO=https://github.com/ceph/ceph-container.git, architecture=x86_64, ceph=True, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, maintainer=Guillaume Abrioux , vendor=Red Hat, Inc., description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, RELEASE=HEAD, distribution-scope=public, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git) 2026-03-09T15:36:28.906 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 bash[79261]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4 2026-03-09T15:36:28.906 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79330]: 2026-03-09 15:36:28.870954995 +0000 UTC m=+0.015224754 container create 281bba6af21b2aa1e3a72dd117d1caba353bc76b3964d3cddce287c174d09595 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3) 2026-03-09T15:36:28.906 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:28 vm09 ceph-mon[77297]: osd.4 marked itself down and dead 2026-03-09T15:36:28.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:28 vm05 ceph-mon[86498]: osd.4 marked itself down and dead 2026-03-09T15:36:28.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:28 vm05 ceph-mon[88323]: osd.4 marked itself down and dead 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79330]: 2026-03-09 15:36:28.914686131 +0000 UTC m=+0.058955911 container init 281bba6af21b2aa1e3a72dd117d1caba353bc76b3964d3cddce287c174d09595 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79330]: 2026-03-09 15:36:28.917607801 +0000 UTC m=+0.061877560 container start 281bba6af21b2aa1e3a72dd117d1caba353bc76b3964d3cddce287c174d09595 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79330]: 2026-03-09 15:36:28.919340805 +0000 UTC m=+0.063610564 container attach 281bba6af21b2aa1e3a72dd117d1caba353bc76b3964d3cddce287c174d09595 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:28 vm09 podman[79330]: 2026-03-09 15:36:28.865023671 +0000 UTC m=+0.009293439 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79350]: 2026-03-09 15:36:29.059189903 +0000 UTC m=+0.010081695 container died 281bba6af21b2aa1e3a72dd117d1caba353bc76b3964d3cddce287c174d09595 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79350]: 2026-03-09 15:36:29.076768451 +0000 UTC m=+0.027660254 container remove 281bba6af21b2aa1e3a72dd117d1caba353bc76b3964d3cddce287c174d09595 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, OSD_FLAVOR=default, org.label-schema.build-date=20260223, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.4.service: Deactivated successfully. 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 systemd[1]: Stopped Ceph osd.4 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:36:29.191 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.4.service: Consumed 19.982s CPU time. 2026-03-09T15:36:29.563 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 systemd[1]: Starting Ceph osd.4 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:36:29.563 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79445]: 2026-03-09 15:36:29.42186835 +0000 UTC m=+0.017272035 container create 1c875ccbe43efe23e596393c745d5575563b1461442c67622764893fb3eafc26 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3) 2026-03-09T15:36:29.563 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79445]: 2026-03-09 15:36:29.484293234 +0000 UTC m=+0.079696930 container init 1c875ccbe43efe23e596393c745d5575563b1461442c67622764893fb3eafc26 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default) 2026-03-09T15:36:29.563 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79445]: 2026-03-09 15:36:29.488319001 +0000 UTC m=+0.083722686 container start 1c875ccbe43efe23e596393c745d5575563b1461442c67622764893fb3eafc26 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS) 2026-03-09T15:36:29.563 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79445]: 2026-03-09 15:36:29.48940279 +0000 UTC m=+0.084806475 container attach 1c875ccbe43efe23e596393c745d5575563b1461442c67622764893fb3eafc26 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0) 2026-03-09T15:36:29.563 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 podman[79445]: 2026-03-09 15:36:29.414981558 +0000 UTC m=+0.010385253 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:29.908 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:29.908 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 bash[79445]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:29.908 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:29.908 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:29 vm09 bash[79445]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:29.909 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:29 vm09 ceph-mon[77297]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:29.909 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:29 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:29.909 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:29 vm09 ceph-mon[77297]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T15:36:29.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:29 vm05 ceph-mon[86498]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:29.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:29 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:29.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:29 vm05 ceph-mon[86498]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T15:36:29.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:29 vm05 ceph-mon[88323]: pgmap v82: 161 pgs: 161 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:29.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:29 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:29.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:29 vm05 ceph-mon[88323]: osdmap e108: 8 total, 7 up, 8 in 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T15:36:30.186 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T15:36:30.465 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-34067978-e0d5-426d-be9a-440b95bb80b2/osd-block-c4e71d46-f1e6-4cdb-b025-41a19b086c9f --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T15:36:30.465 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-34067978-e0d5-426d-be9a-440b95bb80b2/osd-block-c4e71d46-f1e6-4cdb-b025-41a19b086c9f --path /var/lib/ceph/osd/ceph-4 --no-mon-config 2026-03-09T15:36:30.812 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/ln -snf /dev/ceph-34067978-e0d5-426d-be9a-440b95bb80b2/osd-block-c4e71d46-f1e6-4cdb-b025-41a19b086c9f /var/lib/ceph/osd/ceph-4/block 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/ln -snf /dev/ceph-34067978-e0d5-426d-be9a-440b95bb80b2/osd-block-c4e71d46-f1e6-4cdb-b025-41a19b086c9f /var/lib/ceph/osd/ceph-4/block 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-4/block 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-0 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-4 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate[79456]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79445]: --> ceph-volume lvm activate successful for osd ID: 4 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 podman[79445]: 2026-03-09 15:36:30.490124997 +0000 UTC m=+1.085528682 container died 1c875ccbe43efe23e596393c745d5575563b1461442c67622764893fb3eafc26 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_REF=squid, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 podman[79445]: 2026-03-09 15:36:30.517873696 +0000 UTC m=+1.113277381 container remove 1c875ccbe43efe23e596393c745d5575563b1461442c67622764893fb3eafc26 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-activate, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 podman[79693]: 2026-03-09 15:36:30.620665839 +0000 UTC m=+0.016675328 container create 7111150665fe25176d5ed83b9d97267ada4b120b36ee2efc22a014a6ccd248c8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 podman[79693]: 2026-03-09 15:36:30.652973882 +0000 UTC m=+0.048983381 container init 7111150665fe25176d5ed83b9d97267ada4b120b36ee2efc22a014a6ccd248c8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3) 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 podman[79693]: 2026-03-09 15:36:30.65909968 +0000 UTC m=+0.055109179 container start 7111150665fe25176d5ed83b9d97267ada4b120b36ee2efc22a014a6ccd248c8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 bash[79693]: 7111150665fe25176d5ed83b9d97267ada4b120b36ee2efc22a014a6ccd248c8 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 podman[79693]: 2026-03-09 15:36:30.614389099 +0000 UTC m=+0.010398608 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:30.813 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:30 vm09 systemd[1]: Started Ceph osd.4 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:36:30.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:30 vm09 ceph-mon[77297]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T15:36:30.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:30 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:30.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:30 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:30.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:30 vm05 ceph-mon[86498]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T15:36:30.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:30.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:30.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:30 vm05 ceph-mon[88323]: osdmap e109: 8 total, 7 up, 8 in 2026-03-09T15:36:30.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:30 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:30.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:30 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[86498]: pgmap v85: 161 pgs: 6 peering, 20 stale+active+clean, 135 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[86498]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[88323]: pgmap v85: 161 pgs: 6 peering, 20 stale+active+clean, 135 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[88323]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:31.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:31 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:32.062 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:31 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:36:31.742+0000 7f757e3f0740 -1 Falling back to public interface 2026-03-09T15:36:32.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:31 vm09 ceph-mon[77297]: pgmap v85: 161 pgs: 6 peering, 20 stale+active+clean, 135 active+clean; 457 KiB data, 182 MiB used, 160 GiB / 160 GiB avail; 127 B/s rd, 0 op/s 2026-03-09T15:36:32.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:31 vm09 ceph-mon[77297]: Health check failed: Reduced data availability: 1 pg peering (PG_AVAILABILITY) 2026-03-09T15:36:32.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:31 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:32.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:31 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:32.856 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:36:32] "GET /metrics HTTP/1.1" 200 37921 "" "Prometheus/2.51.0" 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:32 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.312 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:36:32.862+0000 7f757e3f0740 -1 osd.4 0 read_superblock omap replica is missing. 2026-03-09T15:36:33.313 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:32 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:36:32.902+0000 7f757e3f0740 -1 osd.4 107 log_to_monitors true 2026-03-09T15:36:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:32 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:32 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:32 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:32 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:32 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:33.864 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:36:33.543+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-09T15:36:33.864 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:33.517Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:33.864 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:33.518Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:33.864 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:33.518Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:33.864 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:33.518Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:34.149 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:36:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:36:33.886+0000 7f757619b640 -1 osd.4 107 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: pgmap v86: 161 pgs: 27 active+undersized, 6 peering, 3 stale+active+clean, 24 active+undersized+degraded, 101 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 90/627 objects degraded (14.354%) 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 90/627 objects degraded (14.354%), 24 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.149 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: pgmap v86: 161 pgs: 27 active+undersized, 6 peering, 3 stale+active+clean, 24 active+undersized+degraded, 101 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 90/627 objects degraded (14.354%) 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 90/627 objects degraded (14.354%), 24 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: pgmap v86: 161 pgs: 27 active+undersized, 6 peering, 3 stale+active+clean, 24 active+undersized+degraded, 101 active+clean; 457 KiB data, 183 MiB used, 160 GiB / 160 GiB avail; 90/627 objects degraded (14.354%) 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117]' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 90/627 objects degraded (14.354%), 24 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:34.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:34.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:34.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-09T15:36:34.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[86498]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[86498]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[86498]: from='osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[86498]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[88323]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[88323]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[88323]: from='osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:35.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:34 vm05 ceph-mon[88323]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:34 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:34 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (11 PGs are or would become offline) 2026-03-09T15:36:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:34 vm09 ceph-mon[77297]: from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["4"]}]': finished 2026-03-09T15:36:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:34 vm09 ceph-mon[77297]: osdmap e110: 8 total, 7 up, 8 in 2026-03-09T15:36:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:34 vm09 ceph-mon[77297]: from='osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117]' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:35.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:34 vm09 ceph-mon[77297]: from='osd.4 ' entity='osd.4' cmd=[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[86498]: pgmap v88: 161 pgs: 39 active+undersized, 29 active+undersized+degraded, 93 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 108/627 objects degraded (17.225%) 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[86498]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[86498]: osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117] boot 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[86498]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[88323]: pgmap v88: 161 pgs: 39 active+undersized, 29 active+undersized+degraded, 93 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 108/627 objects degraded (17.225%) 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[88323]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[88323]: osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117] boot 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[88323]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T15:36:36.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:35 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:36:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:35 vm09 ceph-mon[77297]: pgmap v88: 161 pgs: 39 active+undersized, 29 active+undersized+degraded, 93 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 108/627 objects degraded (17.225%) 2026-03-09T15:36:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:35 vm09 ceph-mon[77297]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 1 pg peering) 2026-03-09T15:36:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:35 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:35 vm09 ceph-mon[77297]: osd.4 [v2:192.168.123.109:6800/1920777117,v1:192.168.123.109:6801/1920777117] boot 2026-03-09T15:36:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:35 vm09 ceph-mon[77297]: osdmap e111: 8 total, 8 up, 8 in 2026-03-09T15:36:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:35 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 4}]: dispatch 2026-03-09T15:36:37.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:36 vm05 ceph-mon[86498]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T15:36:37.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:36 vm05 ceph-mon[88323]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T15:36:37.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:37.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:36 vm09 ceph-mon[77297]: osdmap e112: 8 total, 8 up, 8 in 2026-03-09T15:36:38.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:37 vm05 ceph-mon[86498]: pgmap v91: 161 pgs: 4 peering, 22 active+undersized, 12 active+undersized+degraded, 123 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 36/627 objects degraded (5.742%) 2026-03-09T15:36:38.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:37 vm05 ceph-mon[88323]: pgmap v91: 161 pgs: 4 peering, 22 active+undersized, 12 active+undersized+degraded, 123 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 36/627 objects degraded (5.742%) 2026-03-09T15:36:38.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:37 vm09 ceph-mon[77297]: pgmap v91: 161 pgs: 4 peering, 22 active+undersized, 12 active+undersized+degraded, 123 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 36/627 objects degraded (5.742%) 2026-03-09T15:36:39.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:38 vm05 ceph-mon[86498]: Health check update: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:39.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:38 vm05 ceph-mon[88323]: Health check update: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:39.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:38 vm09 ceph-mon[77297]: Health check update: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:40.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:39 vm09 ceph-mon[77297]: pgmap v92: 161 pgs: 4 peering, 15 active+undersized, 11 active+undersized+degraded, 131 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 29/627 objects degraded (4.625%) 2026-03-09T15:36:40.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:39 vm05 ceph-mon[86498]: pgmap v92: 161 pgs: 4 peering, 15 active+undersized, 11 active+undersized+degraded, 131 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 29/627 objects degraded (4.625%) 2026-03-09T15:36:40.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:39 vm05 ceph-mon[88323]: pgmap v92: 161 pgs: 4 peering, 15 active+undersized, 11 active+undersized+degraded, 131 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s; 29/627 objects degraded (4.625%) 2026-03-09T15:36:41.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:41 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded) 2026-03-09T15:36:41.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:41 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:36:41.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:41 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded) 2026-03-09T15:36:41.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:41 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:36:41.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:41 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded) 2026-03-09T15:36:41.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:41 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:36:42.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:42 vm09 ceph-mon[77297]: pgmap v93: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 771 B/s rd, 0 op/s 2026-03-09T15:36:42.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:42 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:42 vm05 ceph-mon[86498]: pgmap v93: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 771 B/s rd, 0 op/s 2026-03-09T15:36:42.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:42 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:42 vm05 ceph-mon[88323]: pgmap v93: 161 pgs: 4 peering, 157 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 771 B/s rd, 0 op/s 2026-03-09T15:36:42.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:42 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:42.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:36:42] "GET /metrics HTTP/1.1" 200 37921 "" "Prometheus/2.51.0" 2026-03-09T15:36:43.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:43 vm09 ceph-mon[77297]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:36:43.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:43 vm05 ceph-mon[86498]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:36:43.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:43 vm05 ceph-mon[88323]: pgmap v94: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:36:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:43.518Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:43.518Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:43.519Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:43.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:43.519Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:44.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:44 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:44.148Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-09T15:36:44.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:44 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:44.149Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.4\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.4\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:45 vm05 ceph-mon[86498]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:36:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:45.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:45 vm05 ceph-mon[88323]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:36:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:45.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:45 vm09 ceph-mon[77297]: pgmap v95: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:36:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:46.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:36:47.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:46 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:47.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:47 vm05 ceph-mon[86498]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 964 B/s rd, 0 op/s 2026-03-09T15:36:47.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:47 vm05 ceph-mon[88323]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 964 B/s rd, 0 op/s 2026-03-09T15:36:48.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:47 vm09 ceph-mon[77297]: pgmap v96: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 964 B/s rd, 0 op/s 2026-03-09T15:36:48.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:48.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:49.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:49.707 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:49 vm09 systemd[1]: Stopping Ceph osd.5 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: Upgrade: osd.5 is safe to restart 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: Upgrade: Updating osd.5 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: Deploying daemon osd.5 on vm09 2026-03-09T15:36:49.988 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:49 vm09 ceph-mon[77297]: osd.5 marked itself down and dead 2026-03-09T15:36:49.988 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:49 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[55451]: 2026-03-09T15:36:49.705+0000 7fbc02609700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:36:49.988 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:49 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[55451]: 2026-03-09T15:36:49.705+0000 7fbc02609700 -1 osd.5 112 *** Got signal Terminated *** 2026-03-09T15:36:49.988 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:49 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[55451]: 2026-03-09T15:36:49.705+0000 7fbc02609700 -1 osd.5 112 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: Upgrade: osd.5 is safe to restart 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: Upgrade: Updating osd.5 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: Deploying daemon osd.5 on vm09 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[86498]: osd.5 marked itself down and dead 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: pgmap v97: 161 pgs: 161 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["5"], "max": 16}]: dispatch 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: Upgrade: osd.5 is safe to restart 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: Upgrade: Updating osd.5 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.5"}]: dispatch 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: Deploying daemon osd.5 on vm09 2026-03-09T15:36:50.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:49 vm05 ceph-mon[88323]: osd.5 marked itself down and dead 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:49 vm09 podman[81193]: 2026-03-09 15:36:49.98663683 +0000 UTC m=+0.296829767 container died fbdec571623eab320a79a42588b8d8ba3a2c1e2ee4d1f40843908aab6fa3b8a5 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, version=8, io.openshift.expose-services=, vendor=Red Hat, Inc., GIT_BRANCH=HEAD, RELEASE=HEAD, build-date=2022-05-03T08:36:31.336870, ceph=True, maintainer=Guillaume Abrioux , description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, vcs-type=git, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, distribution-scope=public, io.buildah.version=1.19.8, name=centos-stream, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, GIT_REPO=https://github.com/ceph/ceph-container.git) 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81193]: 2026-03-09 15:36:50.004223221 +0000 UTC m=+0.314416169 container remove fbdec571623eab320a79a42588b8d8ba3a2c1e2ee4d1f40843908aab6fa3b8a5 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, distribution-scope=public, GIT_BRANCH=HEAD, com.redhat.component=centos-stream-container, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, io.buildah.version=1.19.8, build-date=2022-05-03T08:36:31.336870, version=8, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, maintainer=Guillaume Abrioux , GIT_REPO=https://github.com/ceph/ceph-container.git, name=centos-stream, RELEASE=HEAD, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.expose-services=, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, io.k8s.display-name=CentOS Stream 8, vendor=Red Hat, Inc., architecture=x86_64, ceph=True, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, io.openshift.tags=base centos centos-stream, GIT_CLEAN=True) 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 bash[81193]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81260]: 2026-03-09 15:36:50.164926248 +0000 UTC m=+0.018209170 container create 7b7c7c760b41d9cab234bb1006c72c8b57ac6a0f06b0e83b6459d42b1003012b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81260]: 2026-03-09 15:36:50.208337056 +0000 UTC m=+0.061619978 container init 7b7c7c760b41d9cab234bb1006c72c8b57ac6a0f06b0e83b6459d42b1003012b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81260]: 2026-03-09 15:36:50.21230294 +0000 UTC m=+0.065585853 container start 7b7c7c760b41d9cab234bb1006c72c8b57ac6a0f06b0e83b6459d42b1003012b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81260]: 2026-03-09 15:36:50.215415137 +0000 UTC m=+0.068698068 container attach 7b7c7c760b41d9cab234bb1006c72c8b57ac6a0f06b0e83b6459d42b1003012b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid) 2026-03-09T15:36:50.258 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81260]: 2026-03-09 15:36:50.157846815 +0000 UTC m=+0.011129737 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:50.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81279]: 2026-03-09 15:36:50.378541309 +0000 UTC m=+0.010679333 container died 7b7c7c760b41d9cab234bb1006c72c8b57ac6a0f06b0e83b6459d42b1003012b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:36:50.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81279]: 2026-03-09 15:36:50.396543361 +0000 UTC m=+0.028681394 container remove 7b7c7c760b41d9cab234bb1006c72c8b57ac6a0f06b0e83b6459d42b1003012b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS) 2026-03-09T15:36:50.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.5.service: Deactivated successfully. 2026-03-09T15:36:50.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 systemd[1]: Stopped Ceph osd.5 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:36:50.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.5.service: Consumed 27.730s CPU time. 2026-03-09T15:36:50.925 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:50 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:50.925 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:50 vm09 ceph-mon[77297]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T15:36:50.925 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:50 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 systemd[1]: Starting Ceph osd.5 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81364]: 2026-03-09 15:36:50.698617684 +0000 UTC m=+0.017083702 container create 6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81364]: 2026-03-09 15:36:50.743582812 +0000 UTC m=+0.062048840 container init 6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.build-date=20260223) 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81364]: 2026-03-09 15:36:50.747265927 +0000 UTC m=+0.065731935 container start 6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, ceph=True, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81364]: 2026-03-09 15:36:50.748460083 +0000 UTC m=+0.066926101 container attach 6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, ceph=True) 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 podman[81364]: 2026-03-09 15:36:50.692087299 +0000 UTC m=+0.010553327 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 bash[81364]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:50.925 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:50 vm09 bash[81364]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:51.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:50 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:51.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:50 vm05 ceph-mon[86498]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T15:36:51.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:50 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:51.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:50 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:36:51.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:50 vm05 ceph-mon[88323]: osdmap e113: 8 total, 7 up, 8 in 2026-03-09T15:36:51.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:50 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:51.288 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:36:51.288 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d2c8dbe6-b39b-429b-8aeb-3f97e46e24e1/osd-block-b092f491-9b09-4c77-81aa-03a6adc5b415 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-09T15:36:51.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-d2c8dbe6-b39b-429b-8aeb-3f97e46e24e1/osd-block-b092f491-9b09-4c77-81aa-03a6adc5b415 --path /var/lib/ceph/osd/ceph-5 --no-mon-config 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/ln -snf /dev/ceph-d2c8dbe6-b39b-429b-8aeb-3f97e46e24e1/osd-block-b092f491-9b09-4c77-81aa-03a6adc5b415 /var/lib/ceph/osd/ceph-5/block 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/ln -snf /dev/ceph-d2c8dbe6-b39b-429b-8aeb-3f97e46e24e1/osd-block-b092f491-9b09-4c77-81aa-03a6adc5b415 /var/lib/ceph/osd/ceph-5/block 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-5/block 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-1 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-5 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate[81375]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 bash[81364]: --> ceph-volume lvm activate successful for osd ID: 5 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 conmon[81375]: conmon 6ae0650c666b27f017b2 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2.scope/container/memory.events 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 podman[81364]: 2026-03-09 15:36:51.670049028 +0000 UTC m=+0.988515046 container died 6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:36:51.886 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:51 vm09 podman[81364]: 2026-03-09 15:36:51.883840069 +0000 UTC m=+1.202306087 container remove 6ae0650c666b27f017b275f90698f490f6214867e18b7078606195f82b424ea2 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-activate, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, ceph=True, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2) 2026-03-09T15:36:52.165 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:51 vm09 ceph-mon[77297]: pgmap v99: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:36:52.165 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:51 vm09 ceph-mon[77297]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T15:36:52.165 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 podman[81613]: 2026-03-09 15:36:52.00764513 +0000 UTC m=+0.020857106 container create 1512c99eec2be0e1e78406a32bd794eeacb47252968133904ba36d1efaea513e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.license=GPLv2, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T15:36:52.165 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 podman[81613]: 2026-03-09 15:36:51.999765219 +0000 UTC m=+0.012977185 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:36:52.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:51 vm05 ceph-mon[86498]: pgmap v99: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:36:52.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:51 vm05 ceph-mon[86498]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T15:36:52.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:51 vm05 ceph-mon[88323]: pgmap v99: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 201 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:36:52.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:51 vm05 ceph-mon[88323]: osdmap e114: 8 total, 7 up, 8 in 2026-03-09T15:36:52.501 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 podman[81613]: 2026-03-09 15:36:52.163893979 +0000 UTC m=+0.177105955 container init 1512c99eec2be0e1e78406a32bd794eeacb47252968133904ba36d1efaea513e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223) 2026-03-09T15:36:52.501 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 podman[81613]: 2026-03-09 15:36:52.168008872 +0000 UTC m=+0.181220848 container start 1512c99eec2be0e1e78406a32bd794eeacb47252968133904ba36d1efaea513e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.vendor=CentOS) 2026-03-09T15:36:52.501 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 bash[81613]: 1512c99eec2be0e1e78406a32bd794eeacb47252968133904ba36d1efaea513e 2026-03-09T15:36:52.501 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 systemd[1]: Started Ceph osd.5 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:36:52.502 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:52 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:36:52.495+0000 7fdf0191c740 -1 Falling back to public interface 2026-03-09T15:36:52.959 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:36:52] "GET /metrics HTTP/1.1" 200 38002 "" "Prometheus/2.51.0" 2026-03-09T15:36:53.188 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:52 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:53.189 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.189 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.189 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.189 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:52 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:52 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:53.562 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:53 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:36:53.349+0000 7fdf0191c740 -1 osd.5 0 read_superblock omap replica is missing. 2026-03-09T15:36:53.563 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:53 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:36:53.362+0000 7fdf0191c740 -1 osd.5 112 log_to_monitors true 2026-03-09T15:36:53.961 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:53.519Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:53.962 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:53.519Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:53.962 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:53.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:53.962 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:36:53.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: pgmap v101: 161 pgs: 13 active+undersized, 10 peering, 10 stale+active+clean, 9 active+undersized+degraded, 119 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 26/627 objects degraded (4.147%) 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: from='osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: Health check failed: Reduced data availability: 3 pgs peering (PG_AVAILABILITY) 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 26/627 objects degraded (4.147%), 9 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.224 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:53 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.224 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:54.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.5\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.5\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: pgmap v101: 161 pgs: 13 active+undersized, 10 peering, 10 stale+active+clean, 9 active+undersized+degraded, 119 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 26/627 objects degraded (4.147%) 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: from='osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: Health check failed: Reduced data availability: 3 pgs peering (PG_AVAILABILITY) 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 26/627 objects degraded (4.147%), 9 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: pgmap v101: 161 pgs: 13 active+undersized, 10 peering, 10 stale+active+clean, 9 active+undersized+degraded, 119 active+clean; 457 KiB data, 202 MiB used, 160 GiB / 160 GiB avail; 26/627 objects degraded (4.147%) 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: from='osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878]' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]: dispatch 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: Health check failed: Reduced data availability: 3 pgs peering (PG_AVAILABILITY) 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 26/627 objects degraded (4.147%), 9 pgs degraded (PG_DEGRADED) 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:54.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:53 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:55.076 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:54 vm09 ceph-mon[77297]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T15:36:55.076 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:54 vm09 ceph-mon[77297]: from='osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:55.076 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:54 vm09 ceph-mon[77297]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T15:36:55.076 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:54 vm09 ceph-mon[77297]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:55.076 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:36:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:36:54.816+0000 7fdef8ec6640 -1 osd.5 112 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[86498]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[86498]: from='osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[86498]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[86498]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[88323]: from='osd.5 ' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["5"]}]': finished 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[88323]: from='osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878]' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[88323]: osdmap e115: 8 total, 7 up, 8 in 2026-03-09T15:36:55.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:54 vm05 ceph-mon[88323]: from='osd.5 ' entity='osd.5' cmd=[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:36:55.736 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:36:55 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:36:55.285+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: pgmap v103: 161 pgs: 35 active+undersized, 10 peering, 15 active+undersized+degraded, 101 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 52/627 objects degraded (8.293%) 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878] boot 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.368 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: pgmap v103: 161 pgs: 35 active+undersized, 10 peering, 15 active+undersized+degraded, 101 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 52/627 objects degraded (8.293%) 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878] boot 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.369 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: pgmap v103: 161 pgs: 35 active+undersized, 10 peering, 15 active+undersized+degraded, 101 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 52/627 objects degraded (8.293%) 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: osd.5 [v2:192.168.123.109:6808/561202878,v1:192.168.123.109:6809/561202878] boot 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: osdmap e116: 8 total, 8 up, 8 in 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 5}]: dispatch 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:36:56.392 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:36:56.393 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.393 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.393 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:56.393 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:36:57.165 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:36:57.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:36:56 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:36:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:36:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:57 vm09 ceph-mon[77297]: OSD bench result of 28751.182884 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.5. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:36:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:57 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:36:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:57 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:57 vm09 ceph-mon[77297]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T15:36:57.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:57 vm09 ceph-mon[77297]: pgmap v106: 161 pgs: 27 active+undersized, 15 peering, 8 active+undersized+degraded, 111 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[86498]: OSD bench result of 28751.182884 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.5. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[86498]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[86498]: pgmap v106: 161 pgs: 27 active+undersized, 15 peering, 8 active+undersized+degraded, 111 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[88323]: OSD bench result of 28751.182884 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.5. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (20 PGs are or would become offline) 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[88323]: osdmap e117: 8 total, 8 up, 8 in 2026-03-09T15:36:57.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:57 vm05 ceph-mon[88323]: pgmap v106: 161 pgs: 27 active+undersized, 15 peering, 8 active+undersized+degraded, 111 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 31/627 objects degraded (4.944%) 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (3m) 47s ago 8m 24.9M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (2m) 4s ago 7m 45.6M - dad864ee21e9 6a58314a043e 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (2m) 47s ago 7m 50.4M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (2m) 4s ago 9m 489M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (2m) 47s ago 10m 554M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (2m) 47s ago 10m 50.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (2m) 4s ago 9m 41.0M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (2m) 47s ago 9m 40.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (3m) 47s ago 8m 9763k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (2m) 4s ago 8m 9697k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (118s) 47s ago 9m 70.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (95s) 47s ago 9m 71.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (71s) 47s ago 9m 66.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (49s) 47s ago 9m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7796c013c7ba 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (26s) 4s ago 8m 46.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7111150665fe 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (5s) 4s ago 8m 15.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1512c99eec2b 2026-03-09T15:36:57.573 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (8m) 4s ago 8m 55.0M 4096M 17.2.0 e1d6a67b021e ad01856a3458 2026-03-09T15:36:57.574 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (8m) 4s ago 8m 57.1M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:36:57.574 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (2m) 4s ago 8m 44.2M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:36:57.574 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (7m) 47s ago 7m 93.0M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:36:57.574 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (7m) 4s ago 7m 92.3M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:36:57.574 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (7m) 47s ago 7m 94.3M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:36:57.574 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (7m) 4s ago 7m 91.4M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 2, 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 6 2026-03-09T15:36:57.820 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 6, 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 11 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:36:57.821 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "mon" 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "11/23 daemons upgraded", 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:36:58.018 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_WARN Reduced data availability: 3 pgs peering; Degraded data redundancy: 31/627 objects degraded (4.944%), 8 pgs degraded 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout:[WRN] PG_AVAILABILITY: Reduced data availability: 3 pgs peering 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.12 is stuck peering for 7m, current state peering, last acting [3,7] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.5 is stuck peering for 7m, current state peering, last acting [3,2] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.c is stuck peering for 7m, current state peering, last acting [3,6] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout:[WRN] PG_DEGRADED: Degraded data redundancy: 31/627 objects degraded (4.944%), 8 pgs degraded 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.2 is active+undersized+degraded, acting [1,6] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 2.c is active+undersized+degraded, acting [2,0] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.4 is active+undersized+degraded, acting [1,2] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.16 is active+undersized+degraded, acting [7,1] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.1c is active+undersized+degraded, acting [4,1] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 3.1d is active+undersized+degraded, acting [4,6] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 4.15 is active+undersized+degraded, acting [7,3] 2026-03-09T15:36:58.266 INFO:teuthology.orchestra.run.vm05.stdout: pg 6.1a is active+undersized+degraded, acting [4,1] 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[86498]: from='client.44224 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[86498]: from='client.44230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[86498]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1948655287' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[86498]: from='client.34290 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[88323]: from='client.44224 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[88323]: from='client.44230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[88323]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1948655287' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:58.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:58 vm05 ceph-mon[88323]: from='client.34290 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:58 vm09 ceph-mon[77297]: from='client.44224 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.564 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:58 vm09 ceph-mon[77297]: from='client.44230 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.564 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:58 vm09 ceph-mon[77297]: from='client.44233 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:58.564 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:58 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1948655287' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:36:58.564 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:58 vm09 ceph-mon[77297]: from='client.34290 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:36:59.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:59 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/252985871' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:36:59.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:36:59 vm05 ceph-mon[86498]: pgmap v107: 161 pgs: 20 active+undersized, 15 peering, 7 active+undersized+degraded, 119 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s; 21/627 objects degraded (3.349%) 2026-03-09T15:36:59.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:59 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/252985871' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:36:59.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:36:59 vm05 ceph-mon[88323]: pgmap v107: 161 pgs: 20 active+undersized, 15 peering, 7 active+undersized+degraded, 119 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s; 21/627 objects degraded (3.349%) 2026-03-09T15:36:59.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:59 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/252985871' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:36:59.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:36:59 vm09 ceph-mon[77297]: pgmap v107: 161 pgs: 20 active+undersized, 15 peering, 7 active+undersized+degraded, 119 active+clean; 457 KiB data, 220 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 0 op/s; 21/627 objects degraded (3.349%) 2026-03-09T15:37:00.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:00 vm05 ceph-mon[86498]: Health check update: Degraded data redundancy: 21/627 objects degraded (3.349%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:00.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:00 vm05 ceph-mon[88323]: Health check update: Degraded data redundancy: 21/627 objects degraded (3.349%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:00.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:00 vm09 ceph-mon[77297]: Health check update: Degraded data redundancy: 21/627 objects degraded (3.349%), 7 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:01.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:01 vm09 ceph-mon[77297]: pgmap v108: 161 pgs: 10 peering, 151 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 627 B/s rd, 0 op/s 2026-03-09T15:37:01.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:01 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:01.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:01 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:01.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:01 vm09 ceph-mon[77297]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 3 pgs peering) 2026-03-09T15:37:01.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:01 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 21/627 objects degraded (3.349%), 7 pgs degraded) 2026-03-09T15:37:01.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:01 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[86498]: pgmap v108: 161 pgs: 10 peering, 151 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 627 B/s rd, 0 op/s 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[86498]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 3 pgs peering) 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 21/627 objects degraded (3.349%), 7 pgs degraded) 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[88323]: pgmap v108: 161 pgs: 10 peering, 151 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 627 B/s rd, 0 op/s 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[88323]: Health check cleared: PG_AVAILABILITY (was: Reduced data availability: 3 pgs peering) 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 21/627 objects degraded (3.349%), 7 pgs degraded) 2026-03-09T15:37:01.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:01 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:37:02.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:02 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:02.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:37:02] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-09T15:37:02.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:02 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:02.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:02 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:03.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:03 vm09 ceph-mon[77297]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:37:03.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:03 vm05 ceph-mon[86498]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:37:03.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:03 vm05 ceph-mon[88323]: pgmap v109: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 1 op/s 2026-03-09T15:37:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:03.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:03.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:03.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:03.520Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:04.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.6\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:05.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:05 vm09 ceph-mon[77297]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-09T15:37:05.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:05 vm05 ceph-mon[86498]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-09T15:37:05.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:05 vm05 ceph-mon[88323]: pgmap v110: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 968 B/s rd, 0 op/s 2026-03-09T15:37:07.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:07.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:07 vm05 ceph-mon[86498]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 981 B/s rd, 0 op/s 2026-03-09T15:37:07.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:07 vm05 ceph-mon[88323]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 981 B/s rd, 0 op/s 2026-03-09T15:37:08.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:07 vm09 ceph-mon[77297]: pgmap v111: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 981 B/s rd, 0 op/s 2026-03-09T15:37:09.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:09 vm05 ceph-mon[86498]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:37:09.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:09 vm05 ceph-mon[88323]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:37:10.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:09 vm09 ceph-mon[77297]: pgmap v112: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:37:10.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:37:10.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:10.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:37:10.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:10.793 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:10 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.6"}]: dispatch 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:10.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:10 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:11.312 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 systemd[1]: Stopping Ceph osd.6 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:37:11.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[58192]: 2026-03-09T15:37:11.385+0000 7fb756046700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:37:11.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[58192]: 2026-03-09T15:37:11.385+0000 7fb756046700 -1 osd.6 117 *** Got signal Terminated *** 2026-03-09T15:37:11.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[58192]: 2026-03-09T15:37:11.385+0000 7fb756046700 -1 osd.6 117 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[86498]: Upgrade: osd.6 is safe to restart 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[86498]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[86498]: Upgrade: Updating osd.6 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[86498]: Deploying daemon osd.6 on vm09 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[86498]: osd.6 marked itself down and dead 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[88323]: Upgrade: osd.6 is safe to restart 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[88323]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[88323]: Upgrade: Updating osd.6 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[88323]: Deploying daemon osd.6 on vm09 2026-03-09T15:37:11.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:11 vm05 ceph-mon[88323]: osd.6 marked itself down and dead 2026-03-09T15:37:12.003 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:11 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["6"], "max": 16}]: dispatch 2026-03-09T15:37:12.003 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:11 vm09 ceph-mon[77297]: Upgrade: osd.6 is safe to restart 2026-03-09T15:37:12.003 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:11 vm09 ceph-mon[77297]: pgmap v113: 161 pgs: 161 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:37:12.003 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:11 vm09 ceph-mon[77297]: Upgrade: Updating osd.6 2026-03-09T15:37:12.003 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:11 vm09 ceph-mon[77297]: Deploying daemon osd.6 on vm09 2026-03-09T15:37:12.003 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:11 vm09 ceph-mon[77297]: osd.6 marked itself down and dead 2026-03-09T15:37:12.006 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 podman[83118]: 2026-03-09 15:37:11.762536628 +0000 UTC m=+0.392431078 container died ad01856a34588cdb2291f64c3805171ecc1cf788b1b85e08700e654aae00fa1e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, GIT_REPO=https://github.com/ceph/ceph-container.git, build-date=2022-05-03T08:36:31.336870, maintainer=Guillaume Abrioux , com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.tags=base centos centos-stream, vendor=Red Hat, Inc., version=8, com.redhat.component=centos-stream-container, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, CEPH_POINT_RELEASE=-17.2.0, distribution-scope=public, ceph=True, architecture=x86_64, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.openshift.expose-services=, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, RELEASE=HEAD, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, release=754, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-type=git, name=centos-stream, GIT_CLEAN=True) 2026-03-09T15:37:12.006 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 podman[83118]: 2026-03-09 15:37:11.800309676 +0000 UTC m=+0.430204116 container remove ad01856a34588cdb2291f64c3805171ecc1cf788b1b85e08700e654aae00fa1e (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, vcs-type=git, version=8, GIT_BRANCH=HEAD, distribution-scope=public, io.k8s.display-name=CentOS Stream 8, GIT_CLEAN=True, RELEASE=HEAD, architecture=x86_64, build-date=2022-05-03T08:36:31.336870, ceph=True, CEPH_POINT_RELEASE=-17.2.0, name=centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.buildah.version=1.19.8, com.redhat.component=centos-stream-container, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.openshift.expose-services=, maintainer=Guillaume Abrioux , vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, io.openshift.tags=base centos centos-stream, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac) 2026-03-09T15:37:12.006 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 bash[83118]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6 2026-03-09T15:37:12.006 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:11 vm09 podman[83186]: 2026-03-09 15:37:11.963852243 +0000 UTC m=+0.016674137 container create c51b87684424fe40b562ac744fedcb856bf2a3af3f0f6df40a4233932a427bb1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default) 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83186]: 2026-03-09 15:37:12.012438628 +0000 UTC m=+0.065260532 container init c51b87684424fe40b562ac744fedcb856bf2a3af3f0f6df40a4233932a427bb1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, io.buildah.version=1.41.3, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83186]: 2026-03-09 15:37:12.015648278 +0000 UTC m=+0.068470172 container start c51b87684424fe40b562ac744fedcb856bf2a3af3f0f6df40a4233932a427bb1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, ceph=True, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83186]: 2026-03-09 15:37:12.016727249 +0000 UTC m=+0.069549143 container attach c51b87684424fe40b562ac744fedcb856bf2a3af3f0f6df40a4233932a427bb1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2) 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83186]: 2026-03-09 15:37:11.957915833 +0000 UTC m=+0.010737738 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83186]: 2026-03-09 15:37:12.150657686 +0000 UTC m=+0.203479569 container died c51b87684424fe40b562ac744fedcb856bf2a3af3f0f6df40a4233932a427bb1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83186]: 2026-03-09 15:37:12.175262545 +0000 UTC m=+0.228084439 container remove c51b87684424fe40b562ac744fedcb856bf2a3af3f0f6df40a4233932a427bb1 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default) 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.6.service: Deactivated successfully. 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 systemd[1]: Stopped Ceph osd.6 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:37:12.293 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.6.service: Consumed 3.808s CPU time. 2026-03-09T15:37:12.665 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 systemd[1]: Starting Ceph osd.6 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:37:12.665 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83297]: 2026-03-09 15:37:12.524117936 +0000 UTC m=+0.019548325 container create 23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.build-date=20260223) 2026-03-09T15:37:12.665 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83297]: 2026-03-09 15:37:12.581299495 +0000 UTC m=+0.076729894 container init 23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:37:12.665 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83297]: 2026-03-09 15:37:12.584405039 +0000 UTC m=+0.079835438 container start 23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate, org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2) 2026-03-09T15:37:12.665 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83297]: 2026-03-09 15:37:12.585365275 +0000 UTC m=+0.080795674 container attach 23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, CEPH_REF=squid, OSD_FLAVOR=default, org.label-schema.build-date=20260223) 2026-03-09T15:37:12.665 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 podman[83297]: 2026-03-09 15:37:12.515094918 +0000 UTC m=+0.010525327 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[86498]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[86498]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:37:12] "GET /metrics HTTP/1.1" 200 38088 "" "Prometheus/2.51.0" 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[88323]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T15:37:12.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:12 vm05 ceph-mon[88323]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T15:37:13.063 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.063 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 bash[83297]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.063 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.063 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:12 vm09 bash[83297]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:12 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:12 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:37:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:12 vm09 ceph-mon[77297]: osdmap e118: 8 total, 7 up, 8 in 2026-03-09T15:37:13.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:12 vm09 ceph-mon[77297]: osdmap e119: 8 total, 7 up, 8 in 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f51cdf7b-eb2e-4871-837b-bf7364b2c07b/osd-block-8af33735-b39b-4b53-952c-68e544ffc047 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T15:37:13.563 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-f51cdf7b-eb2e-4871-837b-bf7364b2c07b/osd-block-8af33735-b39b-4b53-952c-68e544ffc047 --path /var/lib/ceph/osd/ceph-6 --no-mon-config 2026-03-09T15:37:13.782 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:13.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:13.782 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:13.520Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:13.782 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:13.521Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:13.782 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:13 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:13.521Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:14.067 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:13 vm09 ceph-mon[77297]: pgmap v115: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/ln -snf /dev/ceph-f51cdf7b-eb2e-4871-837b-bf7364b2c07b/osd-block-8af33735-b39b-4b53-952c-68e544ffc047 /var/lib/ceph/osd/ceph-6/block 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/ln -snf /dev/ceph-f51cdf7b-eb2e-4871-837b-bf7364b2c07b/osd-block-8af33735-b39b-4b53-952c-68e544ffc047 /var/lib/ceph/osd/ceph-6/block 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-6/block 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-6 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate[83310]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83297]: --> ceph-volume lvm activate successful for osd ID: 6 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 conmon[83310]: conmon 23cd998f4a13da76dfd4 : Failed to open cgroups file: /sys/fs/cgroup/machine.slice/libpod-23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984.scope/container/memory.events 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 podman[83297]: 2026-03-09 15:37:13.639790458 +0000 UTC m=+1.135220857 container died 23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True) 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 podman[83297]: 2026-03-09 15:37:13.657378184 +0000 UTC m=+1.152808573 container remove 23cd998f4a13da76dfd4ef8c2ebb0d756a985ce09d8d06dde51a863723513984 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-activate, org.label-schema.license=GPLv2, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , ceph=True, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 podman[83548]: 2026-03-09 15:37:13.747811421 +0000 UTC m=+0.015473707 container create 07d4d024aa5895d249670562770990734d969aff707878d2400238d08e9db236 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 podman[83548]: 2026-03-09 15:37:13.781256735 +0000 UTC m=+0.048919032 container init 07d4d024aa5895d249670562770990734d969aff707878d2400238d08e9db236 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS) 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 podman[83548]: 2026-03-09 15:37:13.785304303 +0000 UTC m=+0.052966589 container start 07d4d024aa5895d249670562770990734d969aff707878d2400238d08e9db236 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, ceph=True, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 bash[83548]: 07d4d024aa5895d249670562770990734d969aff707878d2400238d08e9db236 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 podman[83548]: 2026-03-09 15:37:13.741711754 +0000 UTC m=+0.009374050 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:37:14.067 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:13 vm09 systemd[1]: Started Ceph osd.6 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:37:14.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:13 vm05 ceph-mon[86498]: pgmap v115: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:37:14.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:13 vm05 ceph-mon[88323]: pgmap v115: 161 pgs: 13 stale+active+clean, 148 active+clean; 457 KiB data, 225 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:37:14.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.6\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:14.973 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:37:14.866+0000 7f3a52943740 -1 Falling back to public interface 2026-03-09T15:37:14.974 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:14 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:14.974 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:14 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:14.974 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:14 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:14.974 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:14 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:15.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:14 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: pgmap v117: 161 pgs: 20 active+undersized, 4 stale+active+clean, 10 active+undersized+degraded, 127 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 45/627 objects degraded (7.177%) 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 45/627 objects degraded (7.177%), 10 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: pgmap v117: 161 pgs: 20 active+undersized, 4 stale+active+clean, 10 active+undersized+degraded, 127 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 45/627 objects degraded (7.177%) 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 45/627 objects degraded (7.177%), 10 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: pgmap v117: 161 pgs: 20 active+undersized, 4 stale+active+clean, 10 active+undersized+degraded, 127 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 45/627 objects degraded (7.177%) 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 45/627 objects degraded (7.177%), 10 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.313 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:16.313 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:37:15.995+0000 7f3a52943740 -1 osd.6 0 read_superblock omap replica is missing. 2026-03-09T15:37:16.313 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:37:16.043+0000 7f3a52943740 -1 osd.6 117 log_to_monitors true 2026-03-09T15:37:17.062 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:16 vm09 ceph-mon[77297]: from='osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:37:17.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:16 vm09 ceph-mon[77297]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:37:17.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:16 vm05 ceph-mon[86498]: from='osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:37:17.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:16 vm05 ceph-mon[86498]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:37:17.236 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:37:16.991+0000 7fef4b3b6640 -1 mgr.server reply reply (16) Device or resource busy unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T15:37:17.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:16 vm05 ceph-mon[88323]: from='osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657]' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:37:17.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:16 vm05 ceph-mon[88323]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]: dispatch 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: pgmap v118: 161 pgs: 36 active+undersized, 17 active+undersized+degraded, 108 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 73/627 objects degraded (11.643%) 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:18.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:17 vm09 ceph-mon[77297]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T15:37:18.063 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:37:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:37:17.697+0000 7f3a49eed640 -1 osd.6 117 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: pgmap v118: 161 pgs: 36 active+undersized, 17 active+undersized+degraded, 108 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 73/627 objects degraded (11.643%) 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[86498]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: pgmap v118: 161 pgs: 36 active+undersized, 17 active+undersized+degraded, 108 active+clean; 457 KiB data, 229 MiB used, 160 GiB / 160 GiB avail; 73/627 objects degraded (11.643%) 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["6"]}]': finished 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: osdmap e120: 8 total, 7 up, 8 in 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657]' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='osd.6 ' entity='osd.6' cmd=[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:18.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:18.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:18.237 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:17 vm05 ceph-mon[88323]: Upgrade: unsafe to stop osd(s) at this time (19 PGs are or would become offline) 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[86498]: osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657] boot 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[86498]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[88323]: osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657] boot 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[88323]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T15:37:19.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:18 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:37:19.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:18 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:37:19.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:18 vm09 ceph-mon[77297]: osd.6 [v2:192.168.123.109:6816/3776615657,v1:192.168.123.109:6817/3776615657] boot 2026-03-09T15:37:19.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:18 vm09 ceph-mon[77297]: osdmap e121: 8 total, 8 up, 8 in 2026-03-09T15:37:19.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:18 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 6}]: dispatch 2026-03-09T15:37:20.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:19 vm05 ceph-mon[86498]: OSD bench result of 27512.788609 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:37:20.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:19 vm05 ceph-mon[86498]: pgmap v121: 161 pgs: 3 peering, 34 active+undersized, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 64/627 objects degraded (10.207%) 2026-03-09T15:37:20.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:19 vm05 ceph-mon[86498]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T15:37:20.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:19 vm05 ceph-mon[88323]: OSD bench result of 27512.788609 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:37:20.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:19 vm05 ceph-mon[88323]: pgmap v121: 161 pgs: 3 peering, 34 active+undersized, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 64/627 objects degraded (10.207%) 2026-03-09T15:37:20.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:19 vm05 ceph-mon[88323]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T15:37:20.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:19 vm09 ceph-mon[77297]: OSD bench result of 27512.788609 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.6. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:37:20.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:19 vm09 ceph-mon[77297]: pgmap v121: 161 pgs: 3 peering, 34 active+undersized, 16 active+undersized+degraded, 108 active+clean; 457 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 64/627 objects degraded (10.207%) 2026-03-09T15:37:20.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:19 vm09 ceph-mon[77297]: osdmap e122: 8 total, 8 up, 8 in 2026-03-09T15:37:21.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:20 vm05 ceph-mon[86498]: Health check update: Degraded data redundancy: 64/627 objects degraded (10.207%), 16 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:21.248 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:20 vm05 ceph-mon[88323]: Health check update: Degraded data redundancy: 64/627 objects degraded (10.207%), 16 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:21.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:20 vm09 ceph-mon[77297]: Health check update: Degraded data redundancy: 64/627 objects degraded (10.207%), 16 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:22.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:21 vm05 ceph-mon[86498]: pgmap v123: 161 pgs: 12 peering, 22 active+undersized, 10 active+undersized+degraded, 117 active+clean; 457 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 39/627 objects degraded (6.220%) 2026-03-09T15:37:22.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:21 vm05 ceph-mon[88323]: pgmap v123: 161 pgs: 12 peering, 22 active+undersized, 10 active+undersized+degraded, 117 active+clean; 457 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 39/627 objects degraded (6.220%) 2026-03-09T15:37:22.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:21 vm09 ceph-mon[77297]: pgmap v123: 161 pgs: 12 peering, 22 active+undersized, 10 active+undersized+degraded, 117 active+clean; 457 KiB data, 247 MiB used, 160 GiB / 160 GiB avail; 39/627 objects degraded (6.220%) 2026-03-09T15:37:22.972 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:37:22] "GET /metrics HTTP/1.1" 200 38093 "" "Prometheus/2.51.0" 2026-03-09T15:37:23.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:22 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:23.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:22 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:23.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:22 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:23.521Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:23.521Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:23.523Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:23.985 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:23.523Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-mon[86498]: pgmap v124: 161 pgs: 12 peering, 149 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T15:37:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 39/627 objects degraded (6.220%), 10 pgs degraded) 2026-03-09T15:37:24.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:23 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:37:24.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:23 vm05 ceph-mon[88323]: pgmap v124: 161 pgs: 12 peering, 149 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T15:37:24.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:23 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 39/627 objects degraded (6.220%), 10 pgs degraded) 2026-03-09T15:37:24.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:23 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:37:24.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:23 vm09 ceph-mon[77297]: pgmap v124: 161 pgs: 12 peering, 149 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 682 B/s rd, 0 op/s 2026-03-09T15:37:24.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:23 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 39/627 objects degraded (6.220%), 10 pgs degraded) 2026-03-09T15:37:24.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:23 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:37:24.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:24.147Z caller=alerting.go:391 level=warn component="rule manager" alert="unsupported value type" msg="Expanding alert template failed" err="error executing template __alert_CephOSDDown: template: __alert_CephOSDDown:1:358: executing \"__alert_CephOSDDown\" at : error calling query: found duplicate series for the match group {ceph_daemon=\"osd.6\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" data="unsupported value type" 2026-03-09T15:37:24.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.6\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.6\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:26.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:25 vm09 ceph-mon[77297]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 533 B/s rd, 0 op/s 2026-03-09T15:37:26.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:25 vm05 ceph-mon[86498]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 533 B/s rd, 0 op/s 2026-03-09T15:37:26.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:25 vm05 ceph-mon[88323]: pgmap v125: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 533 B/s rd, 0 op/s 2026-03-09T15:37:27.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:28.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:27 vm09 ceph-mon[77297]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:37:28.482 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:37:28.483 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:27 vm05 ceph-mon[88323]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:37:28.483 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:27 vm05 ceph-mon[86498]: pgmap v126: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1.0 KiB/s rd, 1 op/s 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (3m) 79s ago 8m 24.9M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (3m) 13s ago 8m 49.0M - dad864ee21e9 6a58314a043e 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (3m) 79s ago 8m 50.4M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (3m) 13s ago 10m 490M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (3m) 79s ago 11m 554M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (3m) 79s ago 11m 50.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (2m) 13s ago 10m 42.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (2m) 79s ago 10m 40.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (3m) 79s ago 8m 9763k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (3m) 13s ago 8m 9592k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (2m) 79s ago 10m 70.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (2m) 79s ago 9m 71.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (103s) 79s ago 9m 66.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (80s) 79s ago 9m 13.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7796c013c7ba 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (58s) 13s ago 9m 48.1M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7111150665fe 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (36s) 13s ago 9m 71.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1512c99eec2b 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (15s) 13s ago 9m 15.4M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 07d4d024aa58 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (9m) 13s ago 9m 57.1M 4096M 17.2.0 e1d6a67b021e 3cc4727e5f07 2026-03-09T15:37:28.887 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (3m) 13s ago 8m 44.2M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:37:28.888 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (8m) 79s ago 8m 93.0M - 17.2.0 e1d6a67b021e 1d93c894d675 2026-03-09T15:37:28.888 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (8m) 13s ago 8m 92.4M - 17.2.0 e1d6a67b021e 5c4755297ad0 2026-03-09T15:37:28.888 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (8m) 79s ago 8m 94.3M - 17.2.0 e1d6a67b021e fe5812c6c74c 2026-03-09T15:37:28.888 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (8m) 13s ago 8m 91.5M - 17.2.0 e1d6a67b021e 7a967179b651 2026-03-09T15:37:29.131 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 1, 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 7 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 4 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)": 5, 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 12 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:37:29.132 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "mon" 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "12/23 daemons upgraded", 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading osd daemons", 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:37:29.332 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:37:29.567 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:37:30.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:30 vm09 ceph-mon[77297]: from='client.34305 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:30 vm09 ceph-mon[77297]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T15:37:30.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:30 vm09 ceph-mon[77297]: from='client.44243 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:30 vm09 ceph-mon[77297]: from='client.34314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:30 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3986644911' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:30.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:30 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/2279337623' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[88323]: from='client.34305 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[88323]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[88323]: from='client.44243 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[88323]: from='client.34314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3986644911' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/2279337623' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[86498]: from='client.34305 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[86498]: pgmap v127: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 921 B/s rd, 0 op/s 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[86498]: from='client.44243 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[86498]: from='client.34314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3986644911' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:30.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:30 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/2279337623' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:37:31.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:31 vm09 ceph-mon[77297]: from='client.44269 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:31.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:31 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:31.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:31 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:31.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:31 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[86498]: from='client.44269 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[88323]: from='client.44269 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:31.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:31 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:32.273 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:32 vm09 ceph-mon[77297]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 795 B/s rd, 0 op/s 2026-03-09T15:37:32.273 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:32 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:32.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:32 vm05 ceph-mon[86498]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 795 B/s rd, 0 op/s 2026-03-09T15:37:32.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:32 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:32.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:32 vm05 ceph-mon[88323]: pgmap v128: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 795 B/s rd, 0 op/s 2026-03-09T15:37:32.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:32 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:32.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:37:32] "GET /metrics HTTP/1.1" 200 38103 "" "Prometheus/2.51.0" 2026-03-09T15:37:33.111 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:33 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:33.111 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:33 vm09 ceph-mon[77297]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:33.111 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:33 vm09 ceph-mon[77297]: Upgrade: osd.7 is safe to restart 2026-03-09T15:37:33.111 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:33.111 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:37:33.111 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:33 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:33.429 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:33 vm09 systemd[1]: Stopping Ceph osd.7 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[86498]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[86498]: Upgrade: osd.7 is safe to restart 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[88323]: from='mon.0 -' entity='mon.' cmd=[{"prefix": "osd ok-to-stop", "ids": ["7"], "max": 16}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[88323]: Upgrade: osd.7 is safe to restart 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "osd.7"}]: dispatch 2026-03-09T15:37:33.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:33 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:33.812 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[60955]: 2026-03-09T15:37:33.425+0000 7fd5fd74c700 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:37:33.812 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[60955]: 2026-03-09T15:37:33.425+0000 7fd5fd74c700 -1 osd.7 122 *** Got signal Terminated *** 2026-03-09T15:37:33.812 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:33 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[60955]: 2026-03-09T15:37:33.425+0000 7fd5fd74c700 -1 osd.7 122 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:37:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:33.522Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:33.523Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:33.523Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:33.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:33.523Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:34.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:34.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.7\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:34 vm09 ceph-mon[77297]: Upgrade: Updating osd.7 2026-03-09T15:37:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:34 vm09 ceph-mon[77297]: Deploying daemon osd.7 on vm09 2026-03-09T15:37:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:34 vm09 ceph-mon[77297]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:37:34.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:34 vm09 ceph-mon[77297]: osd.7 marked itself down and dead 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[86498]: Upgrade: Updating osd.7 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[86498]: Deploying daemon osd.7 on vm09 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[86498]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[86498]: osd.7 marked itself down and dead 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[88323]: Upgrade: Updating osd.7 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[88323]: Deploying daemon osd.7 on vm09 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[88323]: pgmap v129: 161 pgs: 161 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:37:34.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:34 vm05 ceph-mon[88323]: osd.7 marked itself down and dead 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85122]: 2026-03-09 15:37:34.383593811 +0000 UTC m=+0.980940196 container died 3cc4727e5f07a068ca283cf10da4e1158d7b782e43d9812522b035837959ec29 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, vendor=Red Hat, Inc., architecture=x86_64, io.openshift.expose-services=, version=8, maintainer=Guillaume Abrioux , release=754, vcs-type=git, distribution-scope=public, GIT_REPO=https://github.com/ceph/ceph-container.git, com.redhat.component=centos-stream-container, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, ceph=True, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, build-date=2022-05-03T08:36:31.336870, RELEASE=HEAD, io.k8s.display-name=CentOS Stream 8, GIT_BRANCH=HEAD, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, io.buildah.version=1.19.8, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., name=centos-stream, GIT_CLEAN=True, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, CEPH_POINT_RELEASE=-17.2.0, GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, io.openshift.tags=base centos centos-stream) 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85122]: 2026-03-09 15:37:34.407377375 +0000 UTC m=+1.004723761 container remove 3cc4727e5f07a068ca283cf10da4e1158d7b782e43d9812522b035837959ec29 (image=quay.io/ceph/ceph@sha256:12a0a4f43413fd97a14a3d47a3451b2d2df50020835bb93db666209f3f77617a, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, release=754, summary=Provides a CentOS Stream container based on the Red Hat Universal Base Image, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., vendor=Red Hat, Inc., GIT_COMMIT=b613db0f44179c0940781c1c7fe04e1acb7093ac, com.redhat.component=centos-stream-container, build-date=2022-05-03T08:36:31.336870, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.6-754, vcs-ref=f1ee6e37554363ec55e0035aba1a693d3627fdeb, GIT_CLEAN=True, io.openshift.tags=base centos centos-stream, GIT_REPO=https://github.com/ceph/ceph-container.git, name=centos-stream, com.redhat.license_terms=https://centos.org/legal/licensing-policy/, vcs-type=git, description=CentOS Stream is a continuously delivered distro that tracks just ahead of Red Hat Enterprise Linux development. This image takes the Red Hat UBI and layers on content from CentOS Stream, version=8, CEPH_POINT_RELEASE=-17.2.0, RELEASE=HEAD, io.k8s.display-name=CentOS Stream 8, maintainer=Guillaume Abrioux , ceph=True, io.buildah.version=1.19.8, GIT_BRANCH=HEAD, architecture=x86_64, distribution-scope=public, io.openshift.expose-services=) 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 bash[85122]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.538188944 +0000 UTC m=+0.016159228 container create 2ccf7daf28a668a0a78933ebec896e360b0fc60e2946f083a1b28a0f5f5c3c8d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_REF=squid, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.578732284 +0000 UTC m=+0.056702578 container init 2ccf7daf28a668a0a78933ebec896e360b0fc60e2946f083a1b28a0f5f5c3c8d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, io.buildah.version=1.41.3) 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.581592817 +0000 UTC m=+0.059563101 container start 2ccf7daf28a668a0a78933ebec896e360b0fc60e2946f083a1b28a0f5f5c3c8d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, CEPH_REF=squid, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.583995478 +0000 UTC m=+0.061965751 container attach 2ccf7daf28a668a0a78933ebec896e360b0fc60e2946f083a1b28a0f5f5c3c8d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3) 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.531371916 +0000 UTC m=+0.009342210 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:37:34.707 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.704939698 +0000 UTC m=+0.182909982 container died 2ccf7daf28a668a0a78933ebec896e360b0fc60e2946f083a1b28a0f5f5c3c8d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 podman[85191]: 2026-03-09 15:37:34.723468814 +0000 UTC m=+0.201439098 container remove 2ccf7daf28a668a0a78933ebec896e360b0fc60e2946f083a1b28a0f5f5c3c8d (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7.service: Deactivated successfully. 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7.service: Unit process 85202 (conmon) remains running after unit stopped. 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7.service: Unit process 85210 (podman) remains running after unit stopped. 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 systemd[1]: Stopped Ceph osd.7 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7.service: Consumed 16.233s CPU time, 165.2M memory peak. 2026-03-09T15:37:35.018 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:34 vm09 systemd[1]: Starting Ceph osd.7 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:37:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:35 vm09 ceph-mon[77297]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:37:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:35 vm09 ceph-mon[77297]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T15:37:35.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:35 vm09 ceph-mon[77297]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 podman[85293]: 2026-03-09 15:37:35.016966229 +0000 UTC m=+0.018892060 container create b63befd0f3ed3b8144a7da7305b0e36f6e88d83045d8106c6c01d466f0bb20af (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, OSD_FLAVOR=default, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 podman[85293]: 2026-03-09 15:37:35.060912703 +0000 UTC m=+0.062838534 container init b63befd0f3ed3b8144a7da7305b0e36f6e88d83045d8106c6c01d466f0bb20af (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, ceph=True, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 podman[85293]: 2026-03-09 15:37:35.063790297 +0000 UTC m=+0.065716128 container start b63befd0f3ed3b8144a7da7305b0e36f6e88d83045d8106c6c01d466f0bb20af (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 podman[85293]: 2026-03-09 15:37:35.068374544 +0000 UTC m=+0.070300375 container attach b63befd0f3ed3b8144a7da7305b0e36f6e88d83045d8106c6c01d466f0bb20af (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, org.label-schema.build-date=20260223, CEPH_REF=squid) 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 podman[85293]: 2026-03-09 15:37:35.009895544 +0000 UTC m=+0.011821375 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.313 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:35 vm05 ceph-mon[86498]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:37:35.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:35 vm05 ceph-mon[86498]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T15:37:35.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:35 vm05 ceph-mon[86498]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T15:37:35.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:35 vm05 ceph-mon[88323]: Health check failed: 1 osds down (OSD_DOWN) 2026-03-09T15:37:35.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:35 vm05 ceph-mon[88323]: Health check failed: all OSDs are running squid or later but require_osd_release < squid (OSD_UPGRADE_FINISHED) 2026-03-09T15:37:35.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:35 vm05 ceph-mon[88323]: osdmap e123: 8 total, 7 up, 8 in 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: --> Failed to activate via raw: did not find any matching OSD to activate 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/ceph-authtool --gen-print-key 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-fb5f1858-c98d-40a1-9735-6d9da10f7ee9/osd-block-74ebc967-e985-4f5a-a0f8-c8493e041bc8 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-09T15:37:35.956 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-fb5f1858-c98d-40a1-9735-6d9da10f7ee9/osd-block-74ebc967-e985-4f5a-a0f8-c8493e041bc8 --path /var/lib/ceph/osd/ceph-7 --no-mon-config 2026-03-09T15:37:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:36 vm09 ceph-mon[77297]: pgmap v131: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:37:36.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:36 vm09 ceph-mon[77297]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/ln -snf /dev/ceph-fb5f1858-c98d-40a1-9735-6d9da10f7ee9/osd-block-74ebc967-e985-4f5a-a0f8-c8493e041bc8 /var/lib/ceph/osd/ceph-7/block 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/ln -snf /dev/ceph-fb5f1858-c98d-40a1-9735-6d9da10f7ee9/osd-block-74ebc967-e985-4f5a-a0f8-c8493e041bc8 /var/lib/ceph/osd/ceph-7/block 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-7/block 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-7 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate[85303]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 bash[85293]: --> ceph-volume lvm activate successful for osd ID: 7 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:35 vm09 podman[85293]: 2026-03-09 15:37:35.986173437 +0000 UTC m=+0.988099268 container died b63befd0f3ed3b8144a7da7305b0e36f6e88d83045d8106c6c01d466f0bb20af (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate, org.label-schema.license=GPLv2, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, OSD_FLAVOR=default, io.buildah.version=1.41.3) 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 podman[85293]: 2026-03-09 15:37:36.001930798 +0000 UTC m=+1.003856619 container remove b63befd0f3ed3b8144a7da7305b0e36f6e88d83045d8106c6c01d466f0bb20af (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-activate, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS) 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 podman[85535]: 2026-03-09 15:37:36.105778298 +0000 UTC m=+0.020048156 container create 94ef91f7c8456a8f97c95d60b58a09e25dcf343c22e81d170ea5bf48fafe60d9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, CEPH_REF=squid, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 podman[85535]: 2026-03-09 15:37:36.13775585 +0000 UTC m=+0.052025708 container init 94ef91f7c8456a8f97c95d60b58a09e25dcf343c22e81d170ea5bf48fafe60d9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, ceph=True, org.label-schema.license=GPLv2) 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 podman[85535]: 2026-03-09 15:37:36.143788341 +0000 UTC m=+0.058058199 container start 94ef91f7c8456a8f97c95d60b58a09e25dcf343c22e81d170ea5bf48fafe60d9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.license=GPLv2) 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 bash[85535]: 94ef91f7c8456a8f97c95d60b58a09e25dcf343c22e81d170ea5bf48fafe60d9 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 podman[85535]: 2026-03-09 15:37:36.096981184 +0000 UTC m=+0.011251052 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:37:36.312 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 systemd[1]: Started Ceph osd.7 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:37:36.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:36 vm05 ceph-mon[86498]: pgmap v131: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:37:36.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:36 vm05 ceph-mon[86498]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T15:37:36.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:36 vm05 ceph-mon[88323]: pgmap v131: 161 pgs: 22 stale+active+clean, 139 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:37:36.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:36 vm05 ceph-mon[88323]: osdmap e124: 8 total, 7 up, 8 in 2026-03-09T15:37:37.223 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:37:36.963+0000 7fb38e423740 -1 Falling back to public interface 2026-03-09T15:37:37.225 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:37 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:37.225 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:37 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:37.225 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:37 vm09 ceph-mon[77297]: pgmap v133: 161 pgs: 15 active+undersized, 18 stale+active+clean, 11 active+undersized+degraded, 117 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-09T15:37:37.225 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:36.949Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:37.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:37 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:37.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:37 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:37.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:37 vm05 ceph-mon[86498]: pgmap v133: 161 pgs: 15 active+undersized, 18 stale+active+clean, 11 active+undersized+degraded, 117 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-09T15:37:37.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:37 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:37.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:37 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:37.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:37 vm05 ceph-mon[88323]: pgmap v133: 161 pgs: 15 active+undersized, 18 stale+active+clean, 11 active+undersized+degraded, 117 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 29/627 objects degraded (4.625%) 2026-03-09T15:37:38.107 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:37:37.815+0000 7fb38e423740 -1 osd.7 0 read_superblock omap replica is missing. 2026-03-09T15:37:38.107 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:37 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:37:37.856+0000 7fb38e423740 -1 osd.7 122 log_to_monitors true 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: from='osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: from='osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:38 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: Health check failed: Degraded data redundancy: 29/627 objects degraded (4.625%), 11 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: from='osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638]' entity='osd.7' cmd=[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]: dispatch 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:38.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:38 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.812 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:37:39 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:37:39.411+0000 7fb3859cd640 -1 osd.7 122 set_numa_affinity unable to identify public interface '' numa node: (2) No such file or directory 2026-03-09T15:37:39.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T15:37:39.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T15:37:39.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:39.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:39.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: pgmap v135: 161 pgs: 27 active+undersized, 13 stale+active+clean, 14 active+undersized+degraded, 107 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 33/627 objects degraded (5.263%) 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T15:37:39.813 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:39 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: pgmap v135: 161 pgs: 27 active+undersized, 13 stale+active+clean, 14 active+undersized+degraded, 107 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 33/627 objects degraded (5.263%) 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T15:37:39.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "hdd", "ids": ["7"]}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: osdmap e125: 8 total, 7 up, 8 in 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='osd.7 ' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638]' entity='osd.7' cmd=[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm09", "root=default"]}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: pgmap v135: 161 pgs: 27 active+undersized, 13 stale+active+clean, 14 active+undersized+degraded, 107 active+clean; 457 KiB data, 248 MiB used, 160 GiB / 160 GiB avail; 33/627 objects degraded (5.263%) 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.0"}]': finished 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]: dispatch 2026-03-09T15:37:39.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.1"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]: dispatch 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.2"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]: dispatch 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.3"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]: dispatch 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.4"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]: dispatch 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.5"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]: dispatch 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.6"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]: dispatch 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "osd.7"}]': finished 2026-03-09T15:37:39.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:39 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd require-osd-release", "release": "squid"}]: dispatch 2026-03-09T15:37:40.762 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all osd 2026-03-09T15:37:40.762 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T15:37:40.762 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:37:40.762 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T15:37:40.762 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T15:37:40.762 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638] boot 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all osd 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638] boot 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:40.763 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:40 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all osd 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: Upgrade: Setting require_osd_release to 19 squid 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: Health check cleared: OSD_DOWN (was: 1 osds down) 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: Health check cleared: OSD_UPGRADE_FINISHED (was: all OSDs are running squid or later but require_osd_release < squid) 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "osd require-osd-release", "release": "squid"}]': finished 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: osd.7 [v2:192.168.123.109:6824/968250638,v1:192.168.123.109:6825/968250638] boot 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: osdmap e126: 8 total, 8 up, 8 in 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd metadata", "id": 7}]: dispatch 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:40.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:40 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:41.896 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: OSD bench result of 31090.609457 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all mds 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: pgmap v137: 161 pgs: 41 active+undersized, 28 active+undersized+degraded, 92 active+clean; 457 KiB data, 265 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: Upgrade: Updating rgw.foo.vm05.tiuqos (1/4) 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: Deploying daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[86498]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: OSD bench result of 31090.609457 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all mds 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: pgmap v137: 161 pgs: 41 active+undersized, 28 active+undersized+degraded, 92 active+clean; 457 KiB data, 265 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: Upgrade: Updating rgw.foo.vm05.tiuqos (1/4) 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: Deploying daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:37:41.897 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:41 vm05 ceph-mon[88323]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: OSD bench result of 31090.609457 IOPS is not within the threshold limit range of 50.000000 IOPS and 500.000000 IOPS for osd.7. IOPS capacity is unchanged at 315.000000 IOPS. The recommendation is to establish the osd's IOPS capacity using other benchmark tools (e.g. Fio) and then override osd_mclock_max_capacity_iops_[hdd|ssd]. 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all mds 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: pgmap v137: 161 pgs: 41 active+undersized, 28 active+undersized+degraded, 92 active+clean; 457 KiB data, 265 MiB used, 160 GiB / 160 GiB avail; 85/627 objects degraded (13.557%) 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: Upgrade: Updating rgw.foo.vm05.tiuqos (1/4) 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm05.tiuqos", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: Deploying daemon rgw.foo.vm05.tiuqos on vm05 2026-03-09T15:37:42.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:41 vm09 ceph-mon[77297]: osdmap e127: 8 total, 8 up, 8 in 2026-03-09T15:37:42.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:42.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:42.848 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:37:42] "GET /metrics HTTP/1.1" 200 38103 "" "Prometheus/2.51.0" 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:42.849 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:42 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:43.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:42 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:43.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:43.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:43.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:43.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm05.grnlph", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:43.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:42 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:43.795 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:43.523Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:43.795 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:43.524Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:43.795 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:43.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:43.795 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:43.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:44.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:43 vm09 ceph-mon[77297]: Upgrade: Updating rgw.smpl.vm05.grnlph (2/4) 2026-03-09T15:37:44.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:43 vm09 ceph-mon[77297]: pgmap v139: 161 pgs: 22 peering, 21 active+undersized, 20 active+undersized+degraded, 98 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 63/627 objects degraded (10.048%) 2026-03-09T15:37:44.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:43 vm09 ceph-mon[77297]: Deploying daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:37:44.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:43 vm09 ceph-mon[77297]: Health check update: Degraded data redundancy: 63/627 objects degraded (10.048%), 20 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[86498]: Upgrade: Updating rgw.smpl.vm05.grnlph (2/4) 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[86498]: pgmap v139: 161 pgs: 22 peering, 21 active+undersized, 20 active+undersized+degraded, 98 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 63/627 objects degraded (10.048%) 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[86498]: Deploying daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[86498]: Health check update: Degraded data redundancy: 63/627 objects degraded (10.048%), 20 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[88323]: Upgrade: Updating rgw.smpl.vm05.grnlph (2/4) 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[88323]: pgmap v139: 161 pgs: 22 peering, 21 active+undersized, 20 active+undersized+degraded, 98 active+clean; 457 KiB data, 266 MiB used, 160 GiB / 160 GiB avail; 63/627 objects degraded (10.048%) 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[88323]: Deploying daemon rgw.smpl.vm05.grnlph on vm05 2026-03-09T15:37:44.236 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:43 vm05 ceph-mon[88323]: Health check update: Degraded data redundancy: 63/627 objects degraded (10.048%), 20 pgs degraded (PG_DEGRADED) 2026-03-09T15:37:44.402 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:44 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:44.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.7\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.7\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.109\", device_class=\"hdd\", hostname=\"vm09\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.109\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: Upgrade: Updating rgw.foo.vm09.aljafu (3/4) 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: pgmap v140: 161 pgs: 22 peering, 139 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 103 op/s 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:45.143 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:45 vm09 ceph-mon[77297]: Deploying daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: Upgrade: Updating rgw.foo.vm09.aljafu (3/4) 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: pgmap v140: 161 pgs: 22 peering, 139 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 103 op/s 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[86498]: Deploying daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: Upgrade: Updating rgw.foo.vm09.aljafu (3/4) 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: pgmap v140: 161 pgs: 22 peering, 139 active+clean; 457 KiB data, 274 MiB used, 160 GiB / 160 GiB avail; 67 KiB/s rd, 170 B/s wr, 103 op/s 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.aljafu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:45.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:45 vm05 ceph-mon[88323]: Deploying daemon rgw.foo.vm09.aljafu on vm09 2026-03-09T15:37:46.324 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:46 vm09 ceph-mon[77297]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 63/627 objects degraded (10.048%), 20 pgs degraded) 2026-03-09T15:37:46.324 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:46 vm09 ceph-mon[77297]: Cluster is now healthy 2026-03-09T15:37:46.324 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:46 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:46.324 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:46 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:46.324 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:46 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[86498]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 63/627 objects degraded (10.048%), 20 pgs degraded) 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[86498]: Cluster is now healthy 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[88323]: Health check cleared: PG_DEGRADED (was: Degraded data redundancy: 63/627 objects degraded (10.048%), 20 pgs degraded) 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[88323]: Cluster is now healthy 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:46.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:46 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:46.971 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:46 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:47.678 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:47 vm09 ceph-mon[77297]: Upgrade: Updating rgw.smpl.vm09.mkjxeh (4/4) 2026-03-09T15:37:47.678 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:47.678 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:47.678 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:47 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:47.678 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:47 vm09 ceph-mon[77297]: Deploying daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:37:47.678 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:47 vm09 ceph-mon[77297]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 282 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 127 B/s wr, 156 op/s 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[86498]: Upgrade: Updating rgw.smpl.vm09.mkjxeh (4/4) 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[86498]: Deploying daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[86498]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 282 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 127 B/s wr, 156 op/s 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[88323]: Upgrade: Updating rgw.smpl.vm09.mkjxeh (4/4) 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.rgw.smpl.vm09.mkjxeh", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]: dispatch 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[88323]: Deploying daemon rgw.smpl.vm09.mkjxeh on vm09 2026-03-09T15:37:47.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:47 vm05 ceph-mon[88323]: pgmap v141: 161 pgs: 161 active+clean; 457 KiB data, 282 MiB used, 160 GiB / 160 GiB avail; 101 KiB/s rd, 127 B/s wr, 156 op/s 2026-03-09T15:37:49.147 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:49.148 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:48 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:49.191 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:49.191 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:48 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:49.191 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:49.191 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:48 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 286 MiB used, 160 GiB / 160 GiB avail; 142 KiB/s rd, 507 B/s wr, 218 op/s 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 286 MiB used, 160 GiB / 160 GiB avail; 142 KiB/s rd, 507 B/s wr, 218 op/s 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.219 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.220 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.220 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.220 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:49 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: pgmap v142: 161 pgs: 161 active+clean; 457 KiB data, 286 MiB used, 160 GiB / 160 GiB avail; 142 KiB/s rd, 507 B/s wr, 218 op/s 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:50.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:49 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 201 KiB/s rd, 409 B/s wr, 308 op/s 2026-03-09T15:37:51.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: Detected new or changed devices on vm09 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: Detected new or changed devices on vm05 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all rgw 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.tiuqos"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.tiuqos"}]': finished 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm09.aljafu"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm09.aljafu"}]': finished 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.grnlph"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.grnlph"}]': finished 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm09.mkjxeh"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm09.mkjxeh"}]': finished 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 201 KiB/s rd, 409 B/s wr, 308 op/s 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: Detected new or changed devices on vm09 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: Detected new or changed devices on vm05 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:51.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all rgw 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.tiuqos"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.tiuqos"}]': finished 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm09.aljafu"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm09.aljafu"}]': finished 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.grnlph"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.grnlph"}]': finished 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm09.mkjxeh"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm09.mkjxeh"}]': finished 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:37:51.988 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:51 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: pgmap v143: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 201 KiB/s rd, 409 B/s wr, 308 op/s 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: Detected new or changed devices on vm09 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: Detected new or changed devices on vm05 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config dump", "format": "json"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all rgw 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.tiuqos"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm05.tiuqos"}]': finished 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm09.aljafu"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.foo.vm09.aljafu"}]': finished 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.grnlph"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm05.grnlph"}]': finished 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm09.mkjxeh"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd='[{"prefix": "config rm", "name": "container_image", "who": "client.rgw.smpl.vm09.mkjxeh"}]': finished 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all rbd-mirror 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:37:52.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: Upgrade: Setting container_image for all ceph-exporter 2026-03-09T15:37:52.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:52.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get-or-create", "entity": "client.iscsi.foo.vm05.rfsich", "caps": ["mon", "profile rbd, allow command \"osd blocklist\", allow command \"config-key get\" with \"key\" prefix \"iscsi/\"", "mgr", "allow command \"service status\"", "osd", "allow rwx"]}]: dispatch 2026-03-09T15:37:52.063 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:51 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:37:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:37:52] "GET /metrics HTTP/1.1" 200 38113 "" "Prometheus/2.51.0" 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:52 vm05 ceph-mon[86498]: Upgrade: Updating iscsi.foo.vm05.rfsich 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:52 vm05 ceph-mon[86498]: Deploying daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:52 vm05 ceph-mon[86498]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:52 vm05 ceph-mon[88323]: Upgrade: Updating iscsi.foo.vm05.rfsich 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:52 vm05 ceph-mon[88323]: Deploying daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:37:52.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:52 vm05 ceph-mon[88323]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:53.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:52 vm09 ceph-mon[77297]: Upgrade: Updating iscsi.foo.vm05.rfsich 2026-03-09T15:37:53.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:52 vm09 ceph-mon[77297]: Deploying daemon iscsi.foo.vm05.rfsich on vm05 2026-03-09T15:37:53.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:52 vm09 ceph-mon[77297]: from='client.15156 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:37:53.736 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:53 vm05 ceph-mon[86498]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 221 KiB/s rd, 370 B/s wr, 340 op/s 2026-03-09T15:37:53.736 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:53 vm05 ceph-mon[88323]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 221 KiB/s rd, 370 B/s wr, 340 op/s 2026-03-09T15:37:53.736 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:53.524Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:53.736 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:53.524Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:53.736 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:53.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:53.736 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:37:53 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:37:53.525Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:37:54.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:53 vm09 ceph-mon[77297]: pgmap v144: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 221 KiB/s rd, 370 B/s wr, 340 op/s 2026-03-09T15:37:54.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:55.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:55 vm05 ceph-mon[86498]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 217 KiB/s rd, 341 B/s wr, 335 op/s 2026-03-09T15:37:55.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:55 vm05 ceph-mon[88323]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 217 KiB/s rd, 341 B/s wr, 335 op/s 2026-03-09T15:37:56.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:55 vm09 ceph-mon[77297]: pgmap v145: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 217 KiB/s rd, 341 B/s wr, 335 op/s 2026-03-09T15:37:56.949 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:56 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:56.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:56 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:56.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:56 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:37:57.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:37:56 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:37:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:37:57.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:57 vm05 ceph-mon[86498]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 183 KiB/s rd, 255 B/s wr, 283 op/s 2026-03-09T15:37:57.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:57 vm05 ceph-mon[88323]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 183 KiB/s rd, 255 B/s wr, 283 op/s 2026-03-09T15:37:58.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:57 vm09 ceph-mon[77297]: pgmap v146: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 183 KiB/s rd, 255 B/s wr, 283 op/s 2026-03-09T15:37:59.791 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:37:59.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:37:59 vm05 ceph-mon[86498]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 150 KiB/s rd, 255 B/s wr, 230 op/s 2026-03-09T15:37:59.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:37:59 vm05 ceph-mon[88323]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 150 KiB/s rd, 255 B/s wr, 230 op/s 2026-03-09T15:38:00.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:37:59 vm09 ceph-mon[77297]: pgmap v147: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 150 KiB/s rd, 255 B/s wr, 230 op/s 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (4m) 10s ago 9m 24.9M - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (3m) 10s ago 8m 48.6M - dad864ee21e9 6a58314a043e 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 running (3m) 10s ago 8m 50.7M - 3.5 e1d6a67b021e 3a5f40e66729 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (3m) 10s ago 10m 490M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (3m) 10s ago 11m 561M - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (3m) 10s ago 11m 56.1M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (3m) 10s ago 10m 45.6M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (3m) 10s ago 10m 46.7M 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (4m) 10s ago 9m 9575k - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (4m) 10s ago 9m 9605k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (3m) 10s ago 10m 77.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (2m) 10s ago 10m 78.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (2m) 10s ago 10m 70.6M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (111s) 10s ago 10m 74.3M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7796c013c7ba 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (89s) 10s ago 10m 52.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7111150665fe 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (68s) 10s ago 9m 75.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1512c99eec2b 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (46s) 10s ago 9m 71.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 07d4d024aa58 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (24s) 10s ago 9m 71.8M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 94ef91f7c845 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (3m) 10s ago 9m 46.4M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (18s) 10s ago 8m 97.8M - 19.2.3-678-ge911bdeb 654f31e6858e bb7d3535719f 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (14s) 10s ago 8m 98.7M - 19.2.3-678-ge911bdeb 654f31e6858e f4ad1c473ec2 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (16s) 10s ago 8m 98.8M - 19.2.3-678-ge911bdeb 654f31e6858e 7cfe7cd6d225 2026-03-09T15:38:00.193 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (12s) 10s ago 8m 71.5M - 19.2.3-678-ge911bdeb 654f31e6858e fcf3feba6e80 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:38:00.426 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "osd", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "mon", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "rgw" 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "17/23 daemons upgraded", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Currently upgrading iscsi daemons", 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": false 2026-03-09T15:38:00.624 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:38:00.864 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_OK 2026-03-09T15:38:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[86498]: from='client.44345 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[86498]: from='client.44374 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/2289495185' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:38:00.986 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:00.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[88323]: from='client.44345 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:00.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[88323]: from='client.44374 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:00.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/2289495185' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:38:00.987 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:00 vm09 ceph-mon[77297]: from='client.44345 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:00 vm09 ceph-mon[77297]: from='client.44374 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:00 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/2289495185' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:38:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[86498]: from='client.44380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[86498]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 122 KiB/s rd, 0 B/s wr, 189 op/s 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[86498]: from='client.44392 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3429825960' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[88323]: from='client.44380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[88323]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 122 KiB/s rd, 0 B/s wr, 189 op/s 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[88323]: from='client.44392 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:01.912 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:01 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3429825960' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:38:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:01 vm09 ceph-mon[77297]: from='client.44380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:01 vm09 ceph-mon[77297]: pgmap v148: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 122 KiB/s rd, 0 B/s wr, 189 op/s 2026-03-09T15:38:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:01 vm09 ceph-mon[77297]: from='client.44392 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:02.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:01 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3429825960' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:38:02.167 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:01 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:38:01.995+0000 7fef3d31a640 -1 log_channel(cephadm) log [ERR] : Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. 2026-03-09T15:38:02.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:38:02] "GET /metrics HTTP/1.1" 200 38177 "" "Prometheus/2.51.0" 2026-03-09T15:38:03.011 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:03 vm09 ceph-mon[77297]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. 2026-03-09T15:38:03.011 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.011 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:03 vm09 ceph-mon[77297]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-09T15:38:03.011 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.011 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[86498]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[86498]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[88323]: Upgrade: Paused due to UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[88323]: pgmap v149: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 53 KiB/s rd, 0 B/s wr, 82 op/s 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:38:03.526Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=2 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:38:03.986 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:38:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:38:03.526Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=5 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: Health check failed: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 25 op/s 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: Checking dashboard <-> RGW credentials 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:38:04.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:04 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:04 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:04.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: Health check failed: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 25 op/s 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: Checking dashboard <-> RGW credentials 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:38:04.472 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: Health check failed: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. (UPGRADE_REDEPLOY_DAEMON) 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: pgmap v150: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 0 B/s wr, 25 op/s 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: Checking dashboard <-> RGW credentials 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:38:04.473 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:04 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:05.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:05 vm09 ceph-mon[77297]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-09T15:38:05.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:05 vm09 ceph-mon[77297]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T15:38:05.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:05 vm09 ceph-mon[77297]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-09T15:38:05.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:05 vm05 ceph-mon[86498]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-09T15:38:05.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:05 vm05 ceph-mon[86498]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T15:38:05.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:05 vm05 ceph-mon[86498]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-09T15:38:05.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:05 vm05 ceph-mon[88323]: pgmap v151: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 1 op/s 2026-03-09T15:38:05.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:05 vm05 ceph-mon[88323]: pgmap v152: 161 pgs: 161 active+clean; 457 KiB data, 290 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T15:38:05.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:05 vm05 ceph-mon[88323]: Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-03-09T15:38:07.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:07 vm09 ceph-mon[77297]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 3 op/s 2026-03-09T15:38:07.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:06.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:07.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:07 vm05 ceph-mon[86498]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 3 op/s 2026-03-09T15:38:07.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:07 vm05 ceph-mon[88323]: pgmap v153: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.3 KiB/s rd, 3 op/s 2026-03-09T15:38:09.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:09 vm09 ceph-mon[77297]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 4 op/s 2026-03-09T15:38:09.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:09 vm05 ceph-mon[86498]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 4 op/s 2026-03-09T15:38:09.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:09 vm05 ceph-mon[88323]: pgmap v154: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.7 KiB/s rd, 4 op/s 2026-03-09T15:38:11.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:11 vm09 ceph-mon[77297]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 3 op/s 2026-03-09T15:38:11.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:11 vm05 ceph-mon[86498]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 3 op/s 2026-03-09T15:38:11.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:11 vm05 ceph-mon[88323]: pgmap v155: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 3.2 KiB/s rd, 3 op/s 2026-03-09T15:38:12.323 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:12 vm05 ceph-mon[86498]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 3 op/s 2026-03-09T15:38:12.324 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:12 vm05 ceph-mon[88323]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 3 op/s 2026-03-09T15:38:12.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:12 vm09 ceph-mon[77297]: pgmap v156: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 2.8 KiB/s rd, 3 op/s 2026-03-09T15:38:12.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:38:12] "GET /metrics HTTP/1.1" 200 38177 "" "Prometheus/2.51.0" 2026-03-09T15:38:13.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:13 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1133479067' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:38:13.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:13 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/742120879' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]: dispatch 2026-03-09T15:38:13.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:13 vm05 ceph-mon[86498]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]: dispatch 2026-03-09T15:38:13.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:13 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1133479067' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:38:13.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:13 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/742120879' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]: dispatch 2026-03-09T15:38:13.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:13 vm05 ceph-mon[88323]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]: dispatch 2026-03-09T15:38:13.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:13 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1133479067' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist ls"}]: dispatch 2026-03-09T15:38:13.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:13 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/742120879' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]: dispatch 2026-03-09T15:38:13.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:13 vm09 ceph-mon[77297]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]: dispatch 2026-03-09T15:38:14.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[86498]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]': finished 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[86498]: osdmap e128: 8 total, 8 up, 8 in 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[86498]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1 op/s 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/526784579' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]: dispatch 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[86498]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]: dispatch 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[88323]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]': finished 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[88323]: osdmap e128: 8 total, 8 up, 8 in 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[88323]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1 op/s 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/526784579' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]: dispatch 2026-03-09T15:38:14.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:14 vm05 ceph-mon[88323]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]: dispatch 2026-03-09T15:38:14.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:14.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:14.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:14 vm09 ceph-mon[77297]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2854630909"}]': finished 2026-03-09T15:38:14.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:14 vm09 ceph-mon[77297]: osdmap e128: 8 total, 8 up, 8 in 2026-03-09T15:38:14.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:14 vm09 ceph-mon[77297]: pgmap v158: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1.8 KiB/s rd, 1 op/s 2026-03-09T15:38:14.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:14 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/526784579' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]: dispatch 2026-03-09T15:38:14.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:14 vm09 ceph-mon[77297]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]: dispatch 2026-03-09T15:38:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:15 vm05 ceph-mon[86498]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]': finished 2026-03-09T15:38:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:15 vm05 ceph-mon[86498]: osdmap e129: 8 total, 8 up, 8 in 2026-03-09T15:38:15.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:15 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1753284338' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1320710213"}]: dispatch 2026-03-09T15:38:15.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:15 vm05 ceph-mon[88323]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]': finished 2026-03-09T15:38:15.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:15 vm05 ceph-mon[88323]: osdmap e129: 8 total, 8 up, 8 in 2026-03-09T15:38:15.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:15 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1753284338' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1320710213"}]: dispatch 2026-03-09T15:38:15.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:15 vm09 ceph-mon[77297]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6801/2654619260"}]': finished 2026-03-09T15:38:15.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:15 vm09 ceph-mon[77297]: osdmap e129: 8 total, 8 up, 8 in 2026-03-09T15:38:15.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:15 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1753284338' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1320710213"}]: dispatch 2026-03-09T15:38:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1753284338' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1320710213"}]': finished 2026-03-09T15:38:16.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[86498]: osdmap e130: 8 total, 8 up, 8 in 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[86498]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1213685306' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3538544138"}]: dispatch 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1753284338' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1320710213"}]': finished 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[88323]: osdmap e130: 8 total, 8 up, 8 in 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[88323]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1213685306' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3538544138"}]: dispatch 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:16.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:16 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:16.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:16 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1753284338' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/1320710213"}]': finished 2026-03-09T15:38:16.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:16 vm09 ceph-mon[77297]: osdmap e130: 8 total, 8 up, 8 in 2026-03-09T15:38:16.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:16 vm09 ceph-mon[77297]: pgmap v161: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail 2026-03-09T15:38:16.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:16 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1213685306' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3538544138"}]: dispatch 2026-03-09T15:38:16.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:16 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:38:16.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:16 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:17.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:16.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:17.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:17 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/1213685306' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3538544138"}]': finished 2026-03-09T15:38:17.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:17 vm09 ceph-mon[77297]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T15:38:17.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:17 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/2380723791' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]: dispatch 2026-03-09T15:38:17.312 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:17 vm09 ceph-mon[77297]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]: dispatch 2026-03-09T15:38:17.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/1213685306' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3538544138"}]': finished 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[86498]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/2380723791' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]: dispatch 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[86498]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]: dispatch 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/1213685306' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/3538544138"}]': finished 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[88323]: osdmap e131: 8 total, 8 up, 8 in 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/2380723791' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]: dispatch 2026-03-09T15:38:17.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:17 vm05 ceph-mon[88323]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]: dispatch 2026-03-09T15:38:18.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[86498]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]': finished 2026-03-09T15:38:18.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[86498]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T15:38:18.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[86498]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T15:38:18.486 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3414806162' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/2654619260"}]: dispatch 2026-03-09T15:38:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[88323]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]': finished 2026-03-09T15:38:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[88323]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T15:38:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[88323]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T15:38:18.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:18 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3414806162' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/2654619260"}]: dispatch 2026-03-09T15:38:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:18 vm09 ceph-mon[77297]: from='client.? ' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:0/2433681664"}]': finished 2026-03-09T15:38:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:18 vm09 ceph-mon[77297]: osdmap e132: 8 total, 8 up, 8 in 2026-03-09T15:38:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:18 vm09 ceph-mon[77297]: pgmap v164: 161 pgs: 161 active+clean; 457 KiB data, 294 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 1 op/s 2026-03-09T15:38:18.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:18 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3414806162' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/2654619260"}]: dispatch 2026-03-09T15:38:19.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:19 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3414806162' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/2654619260"}]': finished 2026-03-09T15:38:19.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:19 vm05 ceph-mon[86498]: osdmap e133: 8 total, 8 up, 8 in 2026-03-09T15:38:19.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:19 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3414806162' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/2654619260"}]': finished 2026-03-09T15:38:19.486 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:19 vm05 ceph-mon[88323]: osdmap e133: 8 total, 8 up, 8 in 2026-03-09T15:38:19.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:19 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3414806162' entity='client.iscsi.foo.vm05.rfsich' cmd='[{"prefix": "osd blocklist", "blocklistop": "rm", "addr": "192.168.123.105:6800/2654619260"}]': finished 2026-03-09T15:38:19.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:19 vm09 ceph-mon[77297]: osdmap e133: 8 total, 8 up, 8 in 2026-03-09T15:38:20.485 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:20 vm05 ceph-mon[86498]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 998 B/s rd, 1 op/s 2026-03-09T15:38:20.485 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:20 vm05 ceph-mon[88323]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 998 B/s rd, 1 op/s 2026-03-09T15:38:20.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:20 vm09 ceph-mon[77297]: pgmap v166: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 998 B/s rd, 1 op/s 2026-03-09T15:38:22.589 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:22 vm05 ceph-mon[86498]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T15:38:22.589 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:22 vm05 ceph-mon[88323]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T15:38:22.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:22 vm09 ceph-mon[77297]: pgmap v167: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.5 KiB/s rd, 1 op/s 2026-03-09T15:38:22.986 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:38:22] "GET /metrics HTTP/1.1" 200 38327 "" "Prometheus/2.51.0" 2026-03-09T15:38:23.579 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:23 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:23.579 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:23 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:23.812 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:23 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:24.562 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:24.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:24.562 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:24 vm09 ceph-mon[77297]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 723 B/s rd, 0 op/s 2026-03-09T15:38:24.735 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:24 vm05 ceph-mon[86498]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 723 B/s rd, 0 op/s 2026-03-09T15:38:24.735 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:24 vm05 ceph-mon[88323]: pgmap v168: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 723 B/s rd, 0 op/s 2026-03-09T15:38:26.949 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:26 vm09 ceph-mon[77297]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 634 B/s rd, 0 op/s 2026-03-09T15:38:26.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:26 vm05 ceph-mon[86498]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 634 B/s rd, 0 op/s 2026-03-09T15:38:26.986 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:26 vm05 ceph-mon[88323]: pgmap v169: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 634 B/s rd, 0 op/s 2026-03-09T15:38:27.312 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:26.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:28.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:28 vm05 ceph-mon[86498]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:38:28.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:28 vm05 ceph-mon[88323]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:38:29.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:28 vm09 ceph-mon[77297]: pgmap v170: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:38:30.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:30 vm05 ceph-mon[86498]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 925 B/s rd, 0 op/s 2026-03-09T15:38:30.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:30.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:30 vm05 ceph-mon[88323]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 925 B/s rd, 0 op/s 2026-03-09T15:38:30.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:30 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:31.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:30 vm09 ceph-mon[77297]: pgmap v171: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 925 B/s rd, 0 op/s 2026-03-09T15:38:31.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:30 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:31.086 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:38:31.289 INFO:teuthology.orchestra.run.vm05.stdout:"Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed." 2026-03-09T15:38:31.351 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (4m) 28s ago 9m - - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (4m) 28s ago 9m 48.0M - dad864ee21e9 6a58314a043e 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 unknown 28s ago 9m - - 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (4m) 28s ago 11m 490M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (4m) 28s ago 12m - - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (4m) 28s ago 12m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (3m) 28s ago 11m 46.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (3m) 28s ago 11m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (4m) 28s ago 9m - - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (4m) 28s ago 9m 9613k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (3m) 28s ago 11m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (3m) 28s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (2m) 28s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (2m) 28s ago 10m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7796c013c7ba 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (2m) 28s ago 10m 53.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7111150665fe 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (99s) 28s ago 10m 75.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1512c99eec2b 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (78s) 28s ago 10m 71.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 07d4d024aa58 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (55s) 28s ago 10m 72.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 94ef91f7c845 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (4m) 28s ago 9m 46.4M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (49s) 28s ago 9m - - 19.2.3-678-ge911bdeb 654f31e6858e bb7d3535719f 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (45s) 28s ago 9m 98.9M - 19.2.3-678-ge911bdeb 654f31e6858e f4ad1c473ec2 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (47s) 28s ago 9m - - 19.2.3-678-ge911bdeb 654f31e6858e 7cfe7cd6d225 2026-03-09T15:38:31.880 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (44s) 28s ago 9m 100M - 19.2.3-678-ge911bdeb 654f31e6858e fcf3feba6e80 2026-03-09T15:38:31.930 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:38:32.482 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:38:32.483 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:38:32.533 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'echo "wait for servicemap items w/ changing names to refresh"' 2026-03-09T15:38:32.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:38:32] "GET /metrics HTTP/1.1" 200 38331 "" "Prometheus/2.51.0" 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[86498]: from='client.34494 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[86498]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[86498]: from='client.44479 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[86498]: from='client.44444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3150978507' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[88323]: from='client.34494 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[88323]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[88323]: from='client.44479 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[88323]: from='client.44444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:32.737 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:32 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3150978507' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:38:32.835 INFO:teuthology.orchestra.run.vm05.stdout:wait for servicemap items w/ changing names to refresh 2026-03-09T15:38:32.866 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'sleep 60' 2026-03-09T15:38:33.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:32 vm09 ceph-mon[77297]: from='client.34494 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:33.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:32 vm09 ceph-mon[77297]: pgmap v172: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:33.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:32 vm09 ceph-mon[77297]: from='client.44479 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:33.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:32 vm09 ceph-mon[77297]: from='client.44444 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:38:33.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:32 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3150978507' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:38:33.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:33 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:33.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:33 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:34.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:33 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:34.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:34 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:34.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:34.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:34 vm05 ceph-mon[86498]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:34.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:34 vm05 ceph-mon[88323]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:35.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:34 vm09 ceph-mon[77297]: pgmap v173: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:36.949 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:36 vm09 ceph-mon[77297]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:36.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:36 vm05 ceph-mon[86498]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:37.007 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:36 vm05 ceph-mon[88323]: pgmap v174: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:37.311 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:36 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:36.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:38.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:38 vm05 ceph-mon[86498]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:38.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:38 vm05 ceph-mon[88323]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:39.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:38 vm09 ceph-mon[77297]: pgmap v175: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:40.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:40 vm05 ceph-mon[86498]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:40.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:40 vm05 ceph-mon[88323]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:41.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:40 vm09 ceph-mon[77297]: pgmap v176: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:42.674 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:38:42] "GET /metrics HTTP/1.1" 200 38331 "" "Prometheus/2.51.0" 2026-03-09T15:38:42.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:42 vm05 ceph-mon[86498]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:42.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:42 vm05 ceph-mon[88323]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:43.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:42 vm09 ceph-mon[77297]: pgmap v177: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:43.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:43 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:43.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:43 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:44.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:43 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:44.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:44 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:44.148Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:44.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:44 vm05 ceph-mon[86498]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:44.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:44 vm05 ceph-mon[88323]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:45.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:44 vm09 ceph-mon[77297]: pgmap v178: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:45.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:45 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:45.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:45 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:46.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:45 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:38:46.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:46 vm05 ceph-mon[86498]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:46.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:46 vm05 ceph-mon[88323]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:47.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:46 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:46.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:47.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:46 vm09 ceph-mon[77297]: pgmap v179: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:48.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:48 vm05 ceph-mon[86498]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:48.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:48 vm05 ceph-mon[88323]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:49.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:48 vm09 ceph-mon[77297]: pgmap v180: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:50.985 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:50 vm05 ceph-mon[86498]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:50.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:50 vm05 ceph-mon[88323]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:51.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:50 vm09 ceph-mon[77297]: pgmap v181: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:52.720 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:38:52 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:38:52] "GET /metrics HTTP/1.1" 200 38331 "" "Prometheus/2.51.0" 2026-03-09T15:38:52.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:52 vm05 ceph-mon[86498]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:52.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:52 vm05 ceph-mon[88323]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:53.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:52 vm09 ceph-mon[77297]: pgmap v182: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:53.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:53 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:53.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:53 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:54.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:53 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:38:54.561 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:54 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:54.147Z caller=group.go:483 level=warn name=CephOSDFlapping index=13 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=osd msg="Evaluating rule failed" rule="alert: CephOSDFlapping\nexpr: (rate(ceph_osd_up[5m]) * on (ceph_daemon) group_left (hostname) ceph_osd_metadata)\n * 60 > 1\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.4.4\n severity: warning\n type: ceph_default\nannotations:\n description: OSD {{ $labels.ceph_daemon }} on {{ $labels.hostname }} was marked\n down and back up {{ $value | humanize }} times once a minute for 5 minutes. This\n may indicate a network issue (latency, packet loss, MTU mismatch) on the cluster\n network, or the public network if no cluster network is deployed. Check the network\n stats on the listed host(s).\n documentation: https://docs.ceph.com/en/latest/rados/troubleshooting/troubleshooting-osd#flapping-osds\n summary: Network issues are causing OSDs to flap (mark each other down)\n" err="found duplicate series for the match group {ceph_daemon=\"osd.0\"} on the right hand-side of the operation: [{__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"ceph_cluster\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}, {__name__=\"ceph_osd_metadata\", ceph_daemon=\"osd.0\", ceph_version=\"ceph version 17.2.0 (43e2e60a7559d3f46c9d53f1ca875fd499a1e35e) quincy (stable)\", cluster_addr=\"192.168.123.105\", device_class=\"hdd\", hostname=\"vm05\", instance=\"192.168.123.109:9283\", job=\"ceph\", objectstore=\"bluestore\", public_addr=\"192.168.123.105\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:54.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:54 vm05 ceph-mon[86498]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:54.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:54 vm05 ceph-mon[88323]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:55.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:54 vm09 ceph-mon[77297]: pgmap v183: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:57.061 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:38:56 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:38:56.948Z caller=group.go:483 level=warn name=CephNodeDiskspaceWarning index=4 component="rule manager" file=/etc/prometheus/alerting/ceph_alerts.yml group=nodes msg="Evaluating rule failed" rule="alert: CephNodeDiskspaceWarning\nexpr: predict_linear(node_filesystem_free_bytes{device=~\"/.*\"}[2d], 3600 * 24 * 5)\n * on (instance) group_left (nodename) node_uname_info < 0\nlabels:\n oid: 1.3.6.1.4.1.50495.1.2.1.8.4\n severity: warning\n type: ceph_default\nannotations:\n description: Mountpoint {{ $labels.mountpoint }} on {{ $labels.nodename }} will\n be full in less than 5 days based on the 48 hour trailing fill rate.\n summary: Host filesystem free space is getting low\n" err="found duplicate series for the match group {instance=\"vm09\"} on the right hand-side of the operation: [{__name__=\"node_uname_info\", cluster=\"452f6a00-1bcc-11f1-a1ee-7f1a2af01dea\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}, {__name__=\"node_uname_info\", domainname=\"(none)\", instance=\"vm09\", job=\"node\", machine=\"x86_64\", nodename=\"vm09\", release=\"5.14.0-686.el9.x86_64\", sysname=\"Linux\", version=\"#1 SMP PREEMPT_DYNAMIC Thu Feb 19 10:49:27 UTC 2026\"}];many-to-many matching not allowed: matching labels must be unique on one side" 2026-03-09T15:38:57.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:56 vm09 ceph-mon[77297]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:57.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:56 vm05 ceph-mon[86498]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:57.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:56 vm05 ceph-mon[88323]: pgmap v184: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:38:59.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:38:58 vm09 ceph-mon[77297]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:59.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:38:58 vm05 ceph-mon[86498]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:38:59.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:38:58 vm05 ceph-mon[88323]: pgmap v185: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:00 vm09 ceph-mon[77297]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:01.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:00 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:01.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:00 vm05 ceph-mon[86498]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:01.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:00 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:01.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:00 vm05 ceph-mon[88323]: pgmap v186: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:01.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:00 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:02.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:02 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:39:02] "GET /metrics HTTP/1.1" 200 38330 "" "Prometheus/2.51.0" 2026-03-09T15:39:03.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:02 vm09 ceph-mon[77297]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:03.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:02 vm05 ceph-mon[86498]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:03.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:02 vm05 ceph-mon[88323]: pgmap v187: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:04.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:03 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:04.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:39:04.061 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:39:04.062 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:03 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:39:04.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:04.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:39:04.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:39:04.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:39:04.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:04.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "config generate-minimal-conf"}]: dispatch 2026-03-09T15:39:04.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "auth get", "entity": "client.admin"}]: dispatch 2026-03-09T15:39:04.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:03 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' 2026-03-09T15:39:04.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:04 vm05 ceph-mon[86498]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:04.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:04 vm05 ceph-mon[86498]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 991 B/s rd, 0 op/s 2026-03-09T15:39:04.984 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:04 vm05 ceph-mon[86498]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:04 vm05 ceph-mon[88323]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:04 vm05 ceph-mon[88323]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 991 B/s rd, 0 op/s 2026-03-09T15:39:04.985 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:04 vm05 ceph-mon[88323]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:05.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:04 vm09 ceph-mon[77297]: pgmap v188: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:05.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:04 vm09 ceph-mon[77297]: pgmap v189: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 991 B/s rd, 0 op/s 2026-03-09T15:39:05.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:04 vm09 ceph-mon[77297]: pgmap v190: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:07.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:06 vm05 ceph-mon[86498]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:07.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:06 vm05 ceph-mon[88323]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:07.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:06 vm09 ceph-mon[77297]: pgmap v191: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:09.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:08 vm05 ceph-mon[86498]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:09.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:08.845Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:09.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:08 vm05 ceph-mon[88323]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:09.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:08 vm09 ceph-mon[77297]: pgmap v192: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:11.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:10 vm05 ceph-mon[86498]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T15:39:11.234 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:10 vm05 ceph-mon[88323]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T15:39:11.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:10 vm09 ceph-mon[77297]: pgmap v193: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 614 B/s rd, 0 op/s 2026-03-09T15:39:12.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:12 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:39:12] "GET /metrics HTTP/1.1" 200 38330 "" "Prometheus/2.51.0" 2026-03-09T15:39:13.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:12 vm05 ceph-mon[86498]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:13.244 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:12 vm05 ceph-mon[88323]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:13.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:12 vm09 ceph-mon[77297]: pgmap v194: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:14.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:13 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:14.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:13 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:14.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:13 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:15.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:14 vm05 ceph-mon[86498]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:39:15.234 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:14 vm05 ceph-mon[88323]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:39:15.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:14 vm09 ceph-mon[77297]: pgmap v195: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 op/s 2026-03-09T15:39:16.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:15 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:16.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:15 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:16.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:15 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:17.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:16 vm05 ceph-mon[86498]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:17.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:16 vm05 ceph-mon[88323]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:17.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:16.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:17.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:16 vm09 ceph-mon[77297]: pgmap v196: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:19.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:18 vm05 ceph-mon[86498]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:19.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:18.844Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:19.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:18.845Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:19.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:18 vm05 ceph-mon[88323]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:19.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:18 vm09 ceph-mon[77297]: pgmap v197: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:21.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:20 vm05 ceph-mon[86498]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:21.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:20 vm05 ceph-mon[88323]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:21.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:20 vm09 ceph-mon[77297]: pgmap v198: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:22.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:22 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:39:22] "GET /metrics HTTP/1.1" 200 38328 "" "Prometheus/2.51.0" 2026-03-09T15:39:23.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:22 vm05 ceph-mon[86498]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:23.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:22 vm05 ceph-mon[88323]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:23.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:22 vm09 ceph-mon[77297]: pgmap v199: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:24.212 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:23 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:24.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:23 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:24.234 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:23 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:25.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:24 vm05 ceph-mon[86498]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:25.234 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:24 vm05 ceph-mon[88323]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:25.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:24 vm09 ceph-mon[77297]: pgmap v200: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:27.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:26 vm05 ceph-mon[86498]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:27.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:26.950Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:27.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:26.951Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:27.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:26 vm05 ceph-mon[88323]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:27.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:26 vm09 ceph-mon[77297]: pgmap v201: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:29.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:28 vm05 ceph-mon[86498]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:29.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:28.845Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:29.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:28.845Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:29.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:28 vm05 ceph-mon[88323]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:29.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:28 vm09 ceph-mon[77297]: pgmap v202: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:31.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:30 vm05 ceph-mon[86498]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:31.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:30 vm05 ceph-mon[86498]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:31.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:30 vm05 ceph-mon[88323]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:31.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:30 vm05 ceph-mon[88323]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:31.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:30 vm09 ceph-mon[77297]: pgmap v203: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:31.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:30 vm09 ceph-mon[77297]: from='mgr.25004 192.168.123.105:0/1652075334' entity='mgr.y' cmd=[{"prefix": "osd blocklist ls", "format": "json"}]: dispatch 2026-03-09T15:39:32.734 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:32 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:39:32] "GET /metrics HTTP/1.1" 200 38330 "" "Prometheus/2.51.0" 2026-03-09T15:39:33.189 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ps' 2026-03-09T15:39:33.211 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:32 vm05 ceph-mon[88323]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:33.211 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:32 vm05 ceph-mon[86498]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:33.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:32 vm09 ceph-mon[77297]: pgmap v204: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:alertmanager.a vm05 *:9093,9094 running (5m) 90s ago 10m - - 0.25.0 c8568f914cd2 93224b6bb99a 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:grafana.a vm09 *:3000 running (5m) 90s ago 10m 48.0M - dad864ee21e9 6a58314a043e 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:iscsi.foo.vm05.rfsich vm05 unknown 90s ago 10m - - 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:mgr.x vm09 *:8443,9283,8765 running (5m) 90s ago 12m 490M - 19.2.3-678-ge911bdeb 654f31e6858e dd2d7e10f3aa 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:mgr.y vm05 *:8443,9283,8765 running (5m) 90s ago 13m - - 19.2.3-678-ge911bdeb 654f31e6858e db0211ba824d 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:mon.a vm05 running (5m) 90s ago 13m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e 3fa7c78f8952 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:mon.b vm09 running (4m) 90s ago 12m 46.8M 2048M 19.2.3-678-ge911bdeb 654f31e6858e 60013cd0d65b 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:mon.c vm05 running (4m) 90s ago 12m - 2048M 19.2.3-678-ge911bdeb 654f31e6858e c4256ae4b3f9 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.a vm05 *:9100 running (5m) 90s ago 10m - - 1.7.0 72c9c2088986 e730a028339f 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:node-exporter.b vm09 *:9100 running (5m) 90s ago 10m 9613k - 1.7.0 72c9c2088986 a360ac0679f4 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:osd.0 vm05 running (4m) 90s ago 12m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 343a65bb3f01 2026-03-09T15:39:33.696 INFO:teuthology.orchestra.run.vm05.stdout:osd.1 vm05 running (4m) 90s ago 12m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 379185d73d4e 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:osd.2 vm05 running (3m) 90s ago 11m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 101cc91253b5 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:osd.3 vm05 running (3m) 90s ago 11m - 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7796c013c7ba 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:osd.4 vm09 running (3m) 90s ago 11m 53.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 7111150665fe 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:osd.5 vm09 running (2m) 90s ago 11m 75.2M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 1512c99eec2b 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:osd.6 vm09 running (2m) 90s ago 11m 71.5M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 07d4d024aa58 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:osd.7 vm09 running (117s) 90s ago 11m 72.0M 4096M 19.2.3-678-ge911bdeb 654f31e6858e 94ef91f7c845 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:prometheus.a vm09 *:9095 running (5m) 90s ago 10m 46.4M - 2.51.0 1d3b7f56885b 737f11649a72 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm05.tiuqos vm05 *:8000 running (111s) 90s ago 10m - - 19.2.3-678-ge911bdeb 654f31e6858e bb7d3535719f 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:rgw.foo.vm09.aljafu vm09 *:8000 running (107s) 90s ago 10m 98.9M - 19.2.3-678-ge911bdeb 654f31e6858e f4ad1c473ec2 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm05.grnlph vm05 *:80 running (109s) 90s ago 10m - - 19.2.3-678-ge911bdeb 654f31e6858e 7cfe7cd6d225 2026-03-09T15:39:33.697 INFO:teuthology.orchestra.run.vm05.stdout:rgw.smpl.vm09.mkjxeh vm09 *:80 running (105s) 90s ago 10m 100M - 19.2.3-678-ge911bdeb 654f31e6858e fcf3feba6e80 2026-03-09T15:39:33.782 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions' 2026-03-09T15:39:33.991 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:33 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:33.991 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:33 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:34.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:33 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "mon": { 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 3 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "mgr": { 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 2 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "osd": { 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 8 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "rgw": { 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 4 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: }, 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "overall": { 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)": 17 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout: } 2026-03-09T15:39:34.358 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:39:34.436 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch upgrade status' 2026-03-09T15:39:34.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:34 vm05 ceph-mon[86498]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:34.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:34 vm05 ceph-mon[86498]: from='client.44497 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:34.964 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:34 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/4249375811' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:34.964 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:34 vm05 ceph-mon[88323]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout: "target_image": "quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df", 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout: "in_progress": true, 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout: "which": "Upgrading all daemon types on all hosts", 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout: "services_complete": [ 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout: "osd", 2026-03-09T15:39:34.975 INFO:teuthology.orchestra.run.vm05.stdout: "mgr", 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout: "mon", 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout: "rgw" 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout: ], 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout: "progress": "17/23 daemons upgraded", 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout: "message": "Error: UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed.", 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout: "is_paused": true 2026-03-09T15:39:34.976 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:39:35.050 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph health detail' 2026-03-09T15:39:35.231 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:34 vm05 ceph-mon[88323]: from='client.44497 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:35.231 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:34 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/4249375811' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:34 vm09 ceph-mon[77297]: pgmap v205: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:34 vm09 ceph-mon[77297]: from='client.44497 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:35.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:34 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/4249375811' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:HEALTH_WARN 1 failed cephadm daemon(s); Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: daemon iscsi.foo.vm05.rfsich on vm05 is in unknown state 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:[WRN] UPGRADE_REDEPLOY_DAEMON: Upgrading daemon iscsi.foo.vm05.rfsich on host vm05 failed. 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: Upgrade daemon: iscsi.foo.vm05.rfsich: cephadm exited with an error code: 1, stderr: Redeploy daemon iscsi.foo.vm05.rfsich ... 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:Creating ceph-iscsi config... 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:Write file: /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/iscsi.foo.vm05.rfsich/iscsi-gateway.cfg 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:Write file: /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/iscsi.foo.vm05.rfsich/tcmu-runner-entrypoint.sh 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:Failed to trim old cgroups /sys/fs/cgroup/system.slice/system-ceph\x2d452f6a00\x2d1bcc\x2d11f1\x2da1ee\x2d7f1a2af01dea.slice/ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:Non-zero exit code 1 from systemctl start ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:systemctl: stderr Job for ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service failed because the control process exited with error code. 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:systemctl: stderr See "systemctl status ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service" and "journalctl -xeu ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service" for details. 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout:Traceback (most recent call last): 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: File "/usr/lib64/python3.9/runpy.py", line 197, in _run_module_as_main 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: return _run_code(code, main_globals, None, 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: File "/usr/lib64/python3.9/runpy.py", line 87, in _run_code 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: exec(code, run_globals) 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5592, in 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 5580, in main 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3062, in command_deploy_from 2026-03-09T15:39:35.672 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3097, in _common_deploy 2026-03-09T15:39:35.673 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 3117, in _deploy_daemon_container 2026-03-09T15:39:35.673 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1090, in deploy_daemon 2026-03-09T15:39:35.673 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/__main__.py", line 1237, in deploy_daemon_units 2026-03-09T15:39:35.673 INFO:teuthology.orchestra.run.vm05.stdout: File "/tmp/tmpbhp06bz6.cephadm.build/app/cephadmlib/call_wrappers.py", line 307, in call_throws 2026-03-09T15:39:35.673 INFO:teuthology.orchestra.run.vm05.stdout:RuntimeError: Failed command: systemctl start ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich: Job for ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service failed because the control process exited with error code. 2026-03-09T15:39:35.673 INFO:teuthology.orchestra.run.vm05.stdout:See "systemctl status ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service" and "journalctl -xeu ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@iscsi.foo.vm05.rfsich.service" for details. 2026-03-09T15:39:35.735 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | length == 1'"'"'' 2026-03-09T15:39:36.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:35 vm05 ceph-mon[86498]: from='client.34509 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:36.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:35 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3427009658' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:39:36.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:35 vm05 ceph-mon[88323]: from='client.34509 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:36.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:35 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3427009658' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:39:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:35 vm09 ceph-mon[77297]: from='client.34509 -' entity='client.admin' cmd=[{"prefix": "orch upgrade status", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:36.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:35 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3427009658' entity='client.admin' cmd=[{"prefix": "health", "detail": "detail"}]: dispatch 2026-03-09T15:39:36.331 INFO:teuthology.orchestra.run.vm05.stdout:true 2026-03-09T15:39:36.391 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph versions | jq -e '"'"'.overall | keys'"'"' | grep $sha1' 2026-03-09T15:39:36.953 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:36.951Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:36.962 INFO:teuthology.orchestra.run.vm05.stdout: "ceph version 19.2.3-678-ge911bdeb (e911bdebe5c8faa3800735d1568fcdca65db60df) squid (stable)" 2026-03-09T15:39:37.007 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -e sha1=e911bdebe5c8faa3800735d1568fcdca65db60df -- bash -c 'ceph orch ls | grep '"'"'^osd '"'"'' 2026-03-09T15:39:37.207 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:36 vm05 ceph-mon[86498]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:37.207 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:36 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/3928415075' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:37.207 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:36 vm05 ceph-mon[86498]: from='client.? 192.168.123.105:0/2500046738' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:37.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:36 vm05 ceph-mon[88323]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:37.207 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:36 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/3928415075' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:37.208 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:36 vm05 ceph-mon[88323]: from='client.? 192.168.123.105:0/2500046738' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:37.208 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:36 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:36.953Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:36 vm09 ceph-mon[77297]: pgmap v206: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:36 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/3928415075' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:37.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:36 vm09 ceph-mon[77297]: from='client.? 192.168.123.105:0/2500046738' entity='client.admin' cmd=[{"prefix": "versions"}]: dispatch 2026-03-09T15:39:37.544 INFO:teuthology.orchestra.run.vm05.stdout:osd 8 94s ago - 2026-03-09T15:39:37.600 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-03-09T15:39:37.602 INFO:tasks.cephadm:Running commands on role mon.a host ubuntu@vm05.local 2026-03-09T15:39:37.602 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- bash -c 'ceph orch upgrade ls' 2026-03-09T15:39:39.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:38 vm05 ceph-mon[86498]: from='client.34530 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:39.234 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:38 vm05 ceph-mon[86498]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:39.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:38 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:38.845Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:39.235 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:38 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:38.848Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:39.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:38 vm05 ceph-mon[88323]: from='client.34530 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:39.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:38 vm05 ceph-mon[88323]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:39.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:38 vm09 ceph-mon[77297]: from='client.34530 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:39.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:38 vm09 ceph-mon[77297]: pgmap v207: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:39.553 INFO:teuthology.orchestra.run.vm05.stdout:{ 2026-03-09T15:39:39.553 INFO:teuthology.orchestra.run.vm05.stdout: "image": "quay.io/ceph/ceph", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "registry": "quay.io", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "bare_image": "ceph/ceph", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "versions": [ 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "20.2.0", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "20.1.1", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "20.1.0", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "19.2.3", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "19.2.2", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "19.2.1", 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: "19.2.0" 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout: ] 2026-03-09T15:39:39.554 INFO:teuthology.orchestra.run.vm05.stdout:} 2026-03-09T15:39:39.602 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --show-all-versions | grep 16.2.0' 2026-03-09T15:39:40.235 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:39 vm05 ceph-mon[86498]: from='client.44539 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:40.235 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:39 vm05 ceph-mon[88323]: from='client.44539 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:40.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:39 vm09 ceph-mon[77297]: from='client.44539 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:41.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:41 vm09 ceph-mon[77297]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:41.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:41 vm05 ceph-mon[86498]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:41.484 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:41 vm05 ceph-mon[88323]: pgmap v208: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 853 B/s rd, 0 op/s 2026-03-09T15:39:41.631 INFO:teuthology.orchestra.run.vm05.stdout: "16.2.0", 2026-03-09T15:39:41.691 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- bash -c 'ceph orch upgrade ls --image quay.io/ceph/ceph --tags | grep v16.2.2' 2026-03-09T15:39:42.136 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:42 vm05 ceph-mon[86498]: from='client.44483 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:42.136 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:42 vm05 ceph-mon[88323]: from='client.44483 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:42.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:42 vm09 ceph-mon[77297]: from='client.44483 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "show_all_versions": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:42.735 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:42 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: ::ffff:192.168.123.109 - - [09/Mar/2026:15:39:42] "GET /metrics HTTP/1.1" 200 38330 "" "Prometheus/2.51.0" 2026-03-09T15:39:43.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:43 vm09 ceph-mon[77297]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:43.484 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:43 vm05 ceph-mon[86498]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:43.484 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:43 vm05 ceph-mon[88323]: pgmap v209: 161 pgs: 161 active+clean; 457 KiB data, 295 MiB used, 160 GiB / 160 GiB avail; 1.2 KiB/s rd, 1 op/s 2026-03-09T15:39:43.764 INFO:teuthology.orchestra.run.vm05.stdout: "v16.2.2", 2026-03-09T15:39:43.765 INFO:teuthology.orchestra.run.vm05.stdout: "v16.2.2-20210505", 2026-03-09T15:39:43.825 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-03-09T15:39:43.830 INFO:tasks.cephadm:Teardown begin 2026-03-09T15:39:43.830 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:39:43.861 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:39:43.889 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-03-09T15:39:43.889 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm --image quay.io/ceph/ceph:v17.2.0 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea -- ceph mgr module disable cephadm 2026-03-09T15:39:44.053 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 ceph-mon[86498]: from='client.44551 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:44.054 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 ceph-mon[86498]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:44.054 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:44 vm05 ceph-mon[88323]: from='client.44551 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:44.054 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:44 vm05 ceph-mon[88323]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:44.165 INFO:teuthology.orchestra.run.vm05.stderr:Error: statfs /etc/ceph/ceph.conf: no such file or directory 2026-03-09T15:39:44.182 DEBUG:teuthology.orchestra.run:got remote process result: 125 2026-03-09T15:39:44.182 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-03-09T15:39:44.182 DEBUG:teuthology.orchestra.run.vm05:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T15:39:44.196 DEBUG:teuthology.orchestra.run.vm09:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-03-09T15:39:44.212 INFO:tasks.cephadm:Stopping all daemons... 2026-03-09T15:39:44.212 INFO:tasks.cephadm.mon.a:Stopping mon.a... 2026-03-09T15:39:44.212 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a 2026-03-09T15:39:44.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:44 vm09 ceph-mon[77297]: from='client.44551 -' entity='client.admin' cmd=[{"prefix": "orch upgrade ls", "image": "quay.io/ceph/ceph", "tags": true, "target": ["mon-mgr", ""]}]: dispatch 2026-03-09T15:39:44.311 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:44 vm09 ceph-mon[77297]: from='client.44437 -' entity='client.iscsi.foo.vm05.rfsich' cmd=[{"prefix": "service status", "format": "json"}]: dispatch 2026-03-09T15:39:44.345 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 systemd[1]: Stopping Ceph mon.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:44.600 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a[86494]: 2026-03-09T15:39:44.345+0000 7f71e96cc640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.a -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:44.600 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a[86494]: 2026-03-09T15:39:44.345+0000 7f71e96cc640 -1 mon.a@0(leader) e4 *** Got Signal Terminated *** 2026-03-09T15:39:44.600 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 podman[106928]: 2026-03-09 15:39:44.564944046 +0000 UTC m=+0.234401919 container died 3fa7c78f895272d79b001056788f41b4aabd674b4bb700ec7286817898aee0cd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, CEPH_REF=squid) 2026-03-09T15:39:44.600 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 podman[106928]: 2026-03-09 15:39:44.57910455 +0000 UTC m=+0.248562433 container remove 3fa7c78f895272d79b001056788f41b4aabd674b4bb700ec7286817898aee0cd (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.vendor=CentOS, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, ceph=True, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:39:44.600 INFO:journalctl@ceph.mon.a.vm05.stdout:Mar 09 15:39:44 vm05 bash[106928]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-a 2026-03-09T15:39:44.600 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:39:44] ENGINE Bus STOPPING 2026-03-09T15:39:44.649 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.a.service' 2026-03-09T15:39:44.687 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:44.687 INFO:tasks.cephadm.mon.a:Stopped mon.a 2026-03-09T15:39:44.687 INFO:tasks.cephadm.mon.b:Stopping mon.c... 2026-03-09T15:39:44.687 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.c 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:39:44] ENGINE HTTP Server cherrypy._cpwsgi_server.CPWSGIServer(('::', 9283)) shut down 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:39:44] ENGINE Bus STOPPED 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:39:44] ENGINE Bus STARTING 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:39:44] ENGINE Serving on http://:::9283 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: [09/Mar/2026:15:39:44] ENGINE Bus STARTED 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:44 vm05 systemd[1]: Stopping Ceph mon.c for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c[88319]: 2026-03-09T15:39:44.847+0000 7f3400d11640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.c -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:44.884 INFO:journalctl@ceph.mon.c.vm05.stdout:Mar 09 15:39:44 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-c[88319]: 2026-03-09T15:39:44.847+0000 7f3400d11640 -1 mon.c@1(peon) e4 *** Got Signal Terminated *** 2026-03-09T15:39:44.967 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.c.service' 2026-03-09T15:39:44.998 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:44.998 INFO:tasks.cephadm.mon.b:Stopped mon.c 2026-03-09T15:39:44.998 INFO:tasks.cephadm.mon.b:Stopping mon.b... 2026-03-09T15:39:44.998 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.b 2026-03-09T15:39:45.300 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:45 vm09 systemd[1]: Stopping Ceph mon.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:45.300 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:45 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b[77293]: 2026-03-09T15:39:45.110+0000 7f55d76db640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mon -n mon.b -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-journald=true --default-mon-cluster-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:45.300 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:45 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b[77293]: 2026-03-09T15:39:45.110+0000 7f55d76db640 -1 mon.b@2(peon) e4 *** Got Signal Terminated *** 2026-03-09T15:39:45.300 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:45 vm09 podman[92155]: 2026-03-09 15:39:45.22400632 +0000 UTC m=+0.126983988 container died 60013cd0d65b34dec5f404979b5fa447448d4fe627f91ff2776a667e88ef54e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, OSD_FLAVOR=default) 2026-03-09T15:39:45.300 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:45 vm09 podman[92155]: 2026-03-09 15:39:45.241910301 +0000 UTC m=+0.144887969 container remove 60013cd0d65b34dec5f404979b5fa447448d4fe627f91ff2776a667e88ef54e5 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b, org.label-schema.schema-version=1.0, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3) 2026-03-09T15:39:45.300 INFO:journalctl@ceph.mon.b.vm09.stdout:Mar 09 15:39:45 vm09 bash[92155]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mon-b 2026-03-09T15:39:45.307 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mon.b.service' 2026-03-09T15:39:45.346 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:45.346 INFO:tasks.cephadm.mon.b:Stopped mon.b 2026-03-09T15:39:45.346 INFO:tasks.cephadm.mgr.y:Stopping mgr.y... 2026-03-09T15:39:45.346 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y 2026-03-09T15:39:45.604 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service' 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 systemd[1]: Stopping Ceph mgr.y for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:39:45.453+0000 7fef7b7eb640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-mgr -n mgr.y -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y[82796]: 2026-03-09T15:39:45.453+0000 7fef7b7eb640 -1 mgr handle_mgr_signal *** Got signal Terminated *** 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 podman[107148]: 2026-03-09 15:39:45.508453513 +0000 UTC m=+0.069634314 container died db0211ba824d7b7b80e6d4f1aaf6b6fdac39f2cddfaf4ee431bf599e7a2f3901 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, CEPH_REF=squid, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 podman[107148]: 2026-03-09 15:39:45.539286807 +0000 UTC m=+0.100467619 container remove db0211ba824d7b7b80e6d4f1aaf6b6fdac39f2cddfaf4ee431bf599e7a2f3901 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_REF=squid, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 bash[107148]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-mgr-y 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service: Deactivated successfully. 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 systemd[1]: Stopped Ceph mgr.y for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:39:45.628 INFO:journalctl@ceph.mgr.y.vm05.stdout:Mar 09 15:39:45 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.y.service: Consumed 14.741s CPU time. 2026-03-09T15:39:45.637 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:45.637 INFO:tasks.cephadm.mgr.y:Stopped mgr.y 2026-03-09T15:39:45.637 INFO:tasks.cephadm.mgr.x:Stopping mgr.x... 2026-03-09T15:39:45.637 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x 2026-03-09T15:39:45.876 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@mgr.x.service' 2026-03-09T15:39:45.910 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:45.910 INFO:tasks.cephadm.mgr.x:Stopped mgr.x 2026-03-09T15:39:45.910 INFO:tasks.cephadm.osd.0:Stopping osd.0... 2026-03-09T15:39:45.910 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.0 2026-03-09T15:39:46.235 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:45 vm05 systemd[1]: Stopping Ceph osd.0 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:46.235 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:39:46.023+0000 7f3dcc7f5640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.0 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:46.235 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:39:46.023+0000 7f3dcc7f5640 -1 osd.0 133 *** Got signal Terminated *** 2026-03-09T15:39:46.235 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0[90829]: 2026-03-09T15:39:46.023+0000 7f3dcc7f5640 -1 osd.0 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:39:47.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:46.953Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:47.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:46 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:46.954Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:49.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:48 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:48.848Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:49.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:48 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:48.849Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107252]: 2026-03-09 15:39:51.056121532 +0000 UTC m=+5.044902795 container died 343a65bb3f011bc60ffefb2c065da72d55ab4034c2e52b9d8c0a7686e6197268 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid) 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107252]: 2026-03-09 15:39:51.089250318 +0000 UTC m=+5.078031561 container remove 343a65bb3f011bc60ffefb2c065da72d55ab4034c2e52b9d8c0a7686e6197268 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, ceph=True) 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 bash[107252]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107320]: 2026-03-09 15:39:51.246712152 +0000 UTC m=+0.018593209 container create 326cc4d038bfaa2286781f2123dea68915801018aebc6295cfd8f407072ef03a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, io.buildah.version=1.41.3, CEPH_REF=squid, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107320]: 2026-03-09 15:39:51.286888935 +0000 UTC m=+0.058770002 container init 326cc4d038bfaa2286781f2123dea68915801018aebc6295cfd8f407072ef03a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default) 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107320]: 2026-03-09 15:39:51.291108398 +0000 UTC m=+0.062989455 container start 326cc4d038bfaa2286781f2123dea68915801018aebc6295cfd8f407072ef03a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0) 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107320]: 2026-03-09 15:39:51.29277069 +0000 UTC m=+0.064651747 container attach 326cc4d038bfaa2286781f2123dea68915801018aebc6295cfd8f407072ef03a (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-0-deactivate, io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:39:51.338 INFO:journalctl@ceph.osd.0.vm05.stdout:Mar 09 15:39:51 vm05 podman[107320]: 2026-03-09 15:39:51.239818596 +0000 UTC m=+0.011699664 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:39:51.475 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.0.service' 2026-03-09T15:39:51.511 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:51.511 INFO:tasks.cephadm.osd.0:Stopped osd.0 2026-03-09T15:39:51.511 INFO:tasks.cephadm.osd.1:Stopping osd.1... 2026-03-09T15:39:51.511 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.1 2026-03-09T15:39:51.984 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:51 vm05 systemd[1]: Stopping Ceph osd.1 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:51.984 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:39:51.679+0000 7f8825347640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.1 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:51.984 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:39:51.679+0000 7f8825347640 -1 osd.1 133 *** Got signal Terminated *** 2026-03-09T15:39:51.984 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:51 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1[92781]: 2026-03-09T15:39:51.679+0000 7f8825347640 -1 osd.1 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:39:56.955 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:56 vm05 podman[107414]: 2026-03-09 15:39:56.717848325 +0000 UTC m=+5.056380951 container died 379185d73d4e0c4fd2caeb66da15342d8f6393d7d7b12278dd35fc4bf2c227f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, org.label-schema.vendor=CentOS, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team ) 2026-03-09T15:39:56.955 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:56 vm05 podman[107414]: 2026-03-09 15:39:56.753630975 +0000 UTC m=+5.092163601 container remove 379185d73d4e0c4fd2caeb66da15342d8f6393d7d7b12278dd35fc4bf2c227f3 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1, ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, CEPH_REF=squid, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.documentation=https://docs.ceph.com/) 2026-03-09T15:39:56.955 INFO:journalctl@ceph.osd.1.vm05.stdout:Mar 09 15:39:56 vm05 bash[107414]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-1 2026-03-09T15:39:57.176 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.1.service' 2026-03-09T15:39:57.210 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:56.954Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:57.211 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:56 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:56.956Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:57.213 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:39:57.213 INFO:tasks.cephadm.osd.1:Stopped osd.1 2026-03-09T15:39:57.213 INFO:tasks.cephadm.osd.2:Stopping osd.2... 2026-03-09T15:39:57.213 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.2 2026-03-09T15:39:57.484 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:39:57 vm05 systemd[1]: Stopping Ceph osd.2 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:39:57.485 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:39:57 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:39:57.369+0000 7fb3a5de3640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.2 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:39:57.485 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:39:57 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:39:57.369+0000 7fb3a5de3640 -1 osd.2 133 *** Got signal Terminated *** 2026-03-09T15:39:57.485 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:39:57 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2[94895]: 2026-03-09T15:39:57.369+0000 7fb3a5de3640 -1 osd.2 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:39:59.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:58.849Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:39:59.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:39:58 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:39:58.850Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:02.663 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:40:02 vm05 podman[107587]: 2026-03-09 15:40:02.412485455 +0000 UTC m=+5.057560407 container died 101cc91253b59d1d8826ffc42e5dc712b06d246879672f31c1c93053e95905b0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, ceph=True) 2026-03-09T15:40:02.663 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:40:02 vm05 podman[107587]: 2026-03-09 15:40:02.466514381 +0000 UTC m=+5.111589333 container remove 101cc91253b59d1d8826ffc42e5dc712b06d246879672f31c1c93053e95905b0 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2, FROM_IMAGE=quay.io/centos/centos:stream9, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, org.label-schema.name=CentOS Stream 9 Base Image, io.buildah.version=1.41.3, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid) 2026-03-09T15:40:02.663 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:40:02 vm05 bash[107587]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2 2026-03-09T15:40:02.663 INFO:journalctl@ceph.osd.2.vm05.stdout:Mar 09 15:40:02 vm05 podman[107654]: 2026-03-09 15:40:02.627476278 +0000 UTC m=+0.018468154 container create f4d8fe7bc5ad730ce123017dba7c4a12cd4bc56993e15b76afccf2e8a3dd2396 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-2-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, ceph=True, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/) 2026-03-09T15:40:02.882 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.2.service' 2026-03-09T15:40:02.920 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:02.920 INFO:tasks.cephadm.osd.2:Stopped osd.2 2026-03-09T15:40:02.920 INFO:tasks.cephadm.osd.3:Stopping osd.3... 2026-03-09T15:40:02.920 DEBUG:teuthology.orchestra.run.vm05:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.3 2026-03-09T15:40:03.484 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:02 vm05 systemd[1]: Stopping Ceph osd.3 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:03.484 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:40:03.070+0000 7f67f7e96640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.3 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:40:03.484 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:40:03.070+0000 7f67f7e96640 -1 osd.3 133 *** Got signal Terminated *** 2026-03-09T15:40:03.484 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:03 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3[96970]: 2026-03-09T15:40:03.070+0000 7f67f7e96640 -1 osd.3 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:40:07.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:06.957Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:40:07.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:06 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:06.958Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107749]: 2026-03-09 15:40:08.100759957 +0000 UTC m=+5.044275759 container died 7796c013c7ba6af2769d6c7596349134cd8ee02ac202ebfc11fdc0fb9f6e705f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.license=GPLv2, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107749]: 2026-03-09 15:40:08.126609637 +0000 UTC m=+5.070125450 container remove 7796c013c7ba6af2769d6c7596349134cd8ee02ac202ebfc11fdc0fb9f6e705f (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.label-schema.vendor=CentOS, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, ceph=True, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_REF=squid, org.label-schema.schema-version=1.0) 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 bash[107749]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107818]: 2026-03-09 15:40:08.278989599 +0000 UTC m=+0.020343272 container create 2843b7e579af8d0ea6fd805f28f9a251aff97380b73f20aac788f2db96a83472 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107818]: 2026-03-09 15:40:08.317619728 +0000 UTC m=+0.058973420 container init 2843b7e579af8d0ea6fd805f28f9a251aff97380b73f20aac788f2db96a83472 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, ceph=True, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.documentation=https://docs.ceph.com/, io.buildah.version=1.41.3, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107818]: 2026-03-09 15:40:08.325751767 +0000 UTC m=+0.067105449 container start 2843b7e579af8d0ea6fd805f28f9a251aff97380b73f20aac788f2db96a83472 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, CEPH_REF=squid, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.schema-version=1.0, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.build-date=20260223, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107818]: 2026-03-09 15:40:08.328932721 +0000 UTC m=+0.070286403 container attach 2843b7e579af8d0ea6fd805f28f9a251aff97380b73f20aac788f2db96a83472 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-3-deactivate, OSD_FLAVOR=default, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.opencontainers.image.authors=Ceph Release Team , ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.label-schema.license=GPLv2) 2026-03-09T15:40:08.369 INFO:journalctl@ceph.osd.3.vm05.stdout:Mar 09 15:40:08 vm05 podman[107818]: 2026-03-09 15:40:08.269860117 +0000 UTC m=+0.011213809 image pull 654f31e6858eb235bbece362255b685a945f2b6a367e2b88c4930c984fbb214c quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df 2026-03-09T15:40:08.490 DEBUG:teuthology.orchestra.run.vm05:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.3.service' 2026-03-09T15:40:08.527 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:08.527 INFO:tasks.cephadm.osd.3:Stopped osd.3 2026-03-09T15:40:08.527 INFO:tasks.cephadm.osd.4:Stopping osd.4... 2026-03-09T15:40:08.527 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.4 2026-03-09T15:40:08.811 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:08 vm09 systemd[1]: Stopping Ceph osd.4 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:08.811 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:40:08.642+0000 7f757b385640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.4 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:40:08.811 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:40:08.642+0000 7f757b385640 -1 osd.4 133 *** Got signal Terminated *** 2026-03-09T15:40:08.811 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:08 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4[79703]: 2026-03-09T15:40:08.642+0000 7f757b385640 -1 osd.4 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:40:09.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:08.849Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 8 attempts: Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:09.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:08 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:08.850Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 podman[92373]: 2026-03-09 15:40:13.664635957 +0000 UTC m=+5.037005018 container died 7111150665fe25176d5ed83b9d97267ada4b120b36ee2efc22a014a6ccd248c8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 podman[92373]: 2026-03-09 15:40:13.690876617 +0000 UTC m=+5.063245668 container remove 7111150665fe25176d5ed83b9d97267ada4b120b36ee2efc22a014a6ccd248c8 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 bash[92373]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 podman[92442]: 2026-03-09 15:40:13.844946839 +0000 UTC m=+0.019547899 container create 20759936c2837fb48be68656741c74bd71afd92d3e7a78ea9694cb31d437bf5b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, io.buildah.version=1.41.3, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 podman[92442]: 2026-03-09 15:40:13.883256679 +0000 UTC m=+0.057857738 container init 20759936c2837fb48be68656741c74bd71afd92d3e7a78ea9694cb31d437bf5b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.license=GPLv2, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_REF=squid, io.buildah.version=1.41.3, org.label-schema.build-date=20260223) 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 podman[92442]: 2026-03-09 15:40:13.886124765 +0000 UTC m=+0.060725824 container start 20759936c2837fb48be68656741c74bd71afd92d3e7a78ea9694cb31d437bf5b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, io.buildah.version=1.41.3, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.build-date=20260223, ceph=True, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default) 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.4.vm09.stdout:Mar 09 15:40:13 vm09 podman[92442]: 2026-03-09 15:40:13.889371234 +0000 UTC m=+0.063972302 container attach 20759936c2837fb48be68656741c74bd71afd92d3e7a78ea9694cb31d437bf5b (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-4-deactivate, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_REF=squid, org.label-schema.build-date=20260223, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.name=CentOS Stream 9 Base Image, OSD_FLAVOR=default, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , ceph=True, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:40:13.936 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:13 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:13.639+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:48.285712+0000 front 2026-03-09T15:39:48.285738+0000 (oldest deadline 2026-03-09T15:40:12.985631+0000) 2026-03-09T15:40:14.050 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.4.service' 2026-03-09T15:40:14.087 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:14.087 INFO:tasks.cephadm.osd.4:Stopped osd.4 2026-03-09T15:40:14.087 INFO:tasks.cephadm.osd.5:Stopping osd.5... 2026-03-09T15:40:14.087 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.5 2026-03-09T15:40:14.231 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:14 vm09 systemd[1]: Stopping Ceph osd.5 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:14.232 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:14.230+0000 7fdefe8b1640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.5 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:40:14.232 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:14.230+0000 7fdefe8b1640 -1 osd.5 133 *** Got signal Terminated *** 2026-03-09T15:40:14.232 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:14.230+0000 7fdefe8b1640 -1 osd.5 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:40:15.061 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:14 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:14.664+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:48.285712+0000 front 2026-03-09T15:39:48.285738+0000 (oldest deadline 2026-03-09T15:40:12.985631+0000) 2026-03-09T15:40:15.811 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:15.686+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:48.285712+0000 front 2026-03-09T15:39:48.285738+0000 (oldest deadline 2026-03-09T15:40:12.985631+0000) 2026-03-09T15:40:15.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:15 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:15.504+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:16.509 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:16.191+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:16.811 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:16.655+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:48.285712+0000 front 2026-03-09T15:39:48.285738+0000 (oldest deadline 2026-03-09T15:40:12.985631+0000) 2026-03-09T15:40:16.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:16 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:16.508+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:17.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:16.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 8 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:40:17.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:16 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:16.959Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:40:17.491 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:17.198+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:17.491 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:17.198+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:17.811 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:17.672+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:48.285712+0000 front 2026-03-09T15:39:48.285738+0000 (oldest deadline 2026-03-09T15:40:12.985631+0000) 2026-03-09T15:40:17.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:17 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:17.489+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:18.499 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:18.183+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:18.499 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:18.183+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:18.750 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:40:18.743Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph-exporter msg="Unable to refresh target groups" err="Get \"http://192.168.123.105:8765/sd/prometheus/sd-config?service=ceph-exporter\": dial tcp 192.168.123.105:8765: connect: connection refused" 2026-03-09T15:40:18.750 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:40:18.743Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nfs msg="Unable to refresh target groups" err="Get \"http://192.168.123.105:8765/sd/prometheus/sd-config?service=nfs\": dial tcp 192.168.123.105:8765: connect: connection refused" 2026-03-09T15:40:18.750 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:40:18.744Z caller=refresh.go:90 level=error component="discovery manager notify" discovery=http config=config-0 msg="Unable to refresh target groups" err="Get \"http://192.168.123.105:8765/sd/prometheus/sd-config?service=alertmanager\": dial tcp 192.168.123.105:8765: connect: connection refused" 2026-03-09T15:40:18.750 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:40:18.744Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=node msg="Unable to refresh target groups" err="Get \"http://192.168.123.105:8765/sd/prometheus/sd-config?service=node-exporter\": dial tcp 192.168.123.105:8765: connect: connection refused" 2026-03-09T15:40:18.750 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:18.719+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:48.285712+0000 front 2026-03-09T15:39:48.285738+0000 (oldest deadline 2026-03-09T15:40:12.985631+0000) 2026-03-09T15:40:18.750 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5[81624]: 2026-03-09T15:40:18.719+0000 7fdefa6c9640 -1 osd.5 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:52.986163+0000 front 2026-03-09T15:39:52.986175+0000 (oldest deadline 2026-03-09T15:40:18.285842+0000) 2026-03-09T15:40:18.751 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:18.498+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:19.060 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:40:18.751Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=nvmeof msg="Unable to refresh target groups" err="Get \"http://192.168.123.105:8765/sd/prometheus/sd-config?service=nvmeof\": dial tcp 192.168.123.105:8765: connect: connection refused" 2026-03-09T15:40:19.060 INFO:journalctl@ceph.prometheus.a.vm09.stdout:Mar 09 15:40:18 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-prometheus-a[74291]: ts=2026-03-09T15:40:18.751Z caller=refresh.go:90 level=error component="discovery manager scrape" discovery=http config=ceph msg="Unable to refresh target groups" err="Get \"http://192.168.123.105:8765/sd/prometheus/sd-config?service=mgr-prometheus\": dial tcp 192.168.123.105:8765: connect: connection refused" 2026-03-09T15:40:19.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:18.850Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:19.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:18 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:18.851Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:19.489 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:19.224+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:19.489 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:19.224+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:19.489 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:19 vm09 podman[92537]: 2026-03-09 15:40:19.25831465 +0000 UTC m=+5.041869025 container died 1512c99eec2be0e1e78406a32bd794eeacb47252968133904ba36d1efaea513e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, io.buildah.version=1.41.3, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9) 2026-03-09T15:40:19.489 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:19 vm09 podman[92537]: 2026-03-09 15:40:19.289884027 +0000 UTC m=+5.073438402 container remove 1512c99eec2be0e1e78406a32bd794eeacb47252968133904ba36d1efaea513e (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df) 2026-03-09T15:40:19.489 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:19 vm09 bash[92537]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5 2026-03-09T15:40:19.489 INFO:journalctl@ceph.osd.5.vm09.stdout:Mar 09 15:40:19 vm09 podman[92604]: 2026-03-09 15:40:19.462268471 +0000 UTC m=+0.020782558 container create c67d39f0d8912fc5b0e4a8ae129ee16e7e5e283e94b1b387cf61e3be24169759 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-5-deactivate, org.label-schema.license=GPLv2, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, ceph=True, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, CEPH_REF=squid, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.schema-version=1.0, io.buildah.version=1.41.3) 2026-03-09T15:40:19.683 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.5.service' 2026-03-09T15:40:19.719 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:19.719 INFO:tasks.cephadm.osd.5:Stopped osd.5 2026-03-09T15:40:19.719 INFO:tasks.cephadm.osd.6:Stopping osd.6... 2026-03-09T15:40:19.719 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.6 2026-03-09T15:40:19.781 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:19.488+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:20.061 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:19 vm09 systemd[1]: Stopping Ceph osd.6 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:20.061 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:19.878+0000 7f3a4f8d8640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.6 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:40:20.061 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:19.878+0000 7f3a4f8d8640 -1 osd.6 133 *** Got signal Terminated *** 2026-03-09T15:40:20.061 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:19 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:19.878+0000 7f3a4f8d8640 -1 osd.6 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:40:20.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:20.246+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:20.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:20.246+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:20.561 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:20.463+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:20.561 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:20 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:20.463+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:54.762959+0000 front 2026-03-09T15:39:54.762815+0000 (oldest deadline 2026-03-09T15:40:20.062554+0000) 2026-03-09T15:40:21.560 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:21.264+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:21.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:21.264+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:21.561 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:21.495+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:21.561 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:21 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:21.495+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:54.762959+0000 front 2026-03-09T15:39:54.762815+0000 (oldest deadline 2026-03-09T15:40:20.062554+0000) 2026-03-09T15:40:22.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:22.291+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:22.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:22.291+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:22.561 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:22.492+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:22.561 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:22 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:22.492+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:54.762959+0000 front 2026-03-09T15:39:54.762815+0000 (oldest deadline 2026-03-09T15:40:20.062554+0000) 2026-03-09T15:40:23.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:23.341+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:23.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:23.341+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:23.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:23.341+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:23.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:23.485+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:23.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:23 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:23.485+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:54.762959+0000 front 2026-03-09T15:39:54.762815+0000 (oldest deadline 2026-03-09T15:40:20.062554+0000) 2026-03-09T15:40:24.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:24.318+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:24.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:24.318+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:24.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:24.318+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:24.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:24.456+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:49.462461+0000 front 2026-03-09T15:39:49.462583+0000 (oldest deadline 2026-03-09T15:40:14.762234+0000) 2026-03-09T15:40:24.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:24.456+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:54.762959+0000 front 2026-03-09T15:39:54.762815+0000 (oldest deadline 2026-03-09T15:40:20.062554+0000) 2026-03-09T15:40:24.811 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:24 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6[83559]: 2026-03-09T15:40:24.456+0000 7f3a4b6f0640 -1 osd.6 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:40:00.063107+0000 front 2026-03-09T15:40:00.063141+0000 (oldest deadline 2026-03-09T15:40:23.562776+0000) 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:24 vm09 podman[92699]: 2026-03-09 15:40:24.911570536 +0000 UTC m=+5.050790052 container died 07d4d024aa5895d249670562770990734d969aff707878d2400238d08e9db236 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.vendor=CentOS, OSD_FLAVOR=default, ceph=True, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0) 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:24 vm09 podman[92699]: 2026-03-09 15:40:24.943796105 +0000 UTC m=+5.083015631 container remove 07d4d024aa5895d249670562770990734d969aff707878d2400238d08e9db236 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6, CEPH_REF=squid, ceph=True, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2) 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:24 vm09 bash[92699]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:25 vm09 podman[92765]: 2026-03-09 15:40:25.107144096 +0000 UTC m=+0.019034634 container create 29c2cd960762229f312845527bb704ed887c4ef1927021798b238a71a6948c2c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.schema-version=1.0, OSD_FLAVOR=default, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid) 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:25 vm09 podman[92765]: 2026-03-09 15:40:25.147047337 +0000 UTC m=+0.058937895 container init 29c2cd960762229f312845527bb704ed887c4ef1927021798b238a71a6948c2c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, io.buildah.version=1.41.3, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0, ceph=True) 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:25 vm09 podman[92765]: 2026-03-09 15:40:25.15077037 +0000 UTC m=+0.062660918 container start 29c2cd960762229f312845527bb704ed887c4ef1927021798b238a71a6948c2c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_REF=squid, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.license=GPLv2, org.label-schema.build-date=20260223, ceph=True, org.label-schema.vendor=CentOS, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.label-schema.name=CentOS Stream 9 Base Image) 2026-03-09T15:40:25.198 INFO:journalctl@ceph.osd.6.vm09.stdout:Mar 09 15:40:25 vm09 podman[92765]: 2026-03-09 15:40:25.157693784 +0000 UTC m=+0.069584332 container attach 29c2cd960762229f312845527bb704ed887c4ef1927021798b238a71a6948c2c (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-6-deactivate, FROM_IMAGE=quay.io/centos/centos:stream9, OSD_FLAVOR=default, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.build-date=20260223, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, ceph=True) 2026-03-09T15:40:25.319 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.6.service' 2026-03-09T15:40:25.398 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:25.399 INFO:tasks.cephadm.osd.6:Stopped osd.6 2026-03-09T15:40:25.399 INFO:tasks.cephadm.osd.7:Stopping osd.7... 2026-03-09T15:40:25.399 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7 2026-03-09T15:40:25.466 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:25.324+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:25.466 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:25.324+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:25.466 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:25.324+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:25.810 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 systemd[1]: Stopping Ceph osd.7 for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:25.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:25.562+0000 7fb38b3b8640 -1 received signal: Terminated from /run/podman-init -- /usr/bin/ceph-osd -n osd.7 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-journald=true --default-log-to-stderr=false (PID: 1) UID: 0 2026-03-09T15:40:25.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:25.562+0000 7fb38b3b8640 -1 osd.7 133 *** Got signal Terminated *** 2026-03-09T15:40:25.811 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:25 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:25.562+0000 7fb38b3b8640 -1 osd.7 133 *** Immediate shutdown (osd_fast_shutdown=true) *** 2026-03-09T15:40:26.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:26.282+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:26.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:26.282+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:26.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:26 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:26.282+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:27.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:26.958Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=6 err="ceph-dashboard/webhook[0]: notify retry canceled after 7 attempts: Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:40:27.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:26 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:26.959Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[0] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://host.containers.internal:8443/api/prometheus_receiver\": dial tcp: lookup host.containers.internal on 192.168.123.1:53: no such host" 2026-03-09T15:40:27.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:27.329+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:27.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:27.329+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:27.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:27 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:27.329+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:28.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:28.303+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:28.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:28.303+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:28.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:28 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:28.303+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:29.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:28.851Z caller=dispatch.go:352 level=error component=dispatcher msg="Notify for alerts failed" num_alerts=1 err="ceph-dashboard/webhook[1]: notify retry canceled after 7 attempts: Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:29.234 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:28 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:28.851Z caller=notify.go:732 level=warn component=dispatcher receiver=ceph-dashboard integration=webhook[1] msg="Notify attempt failed, will retry later" attempts=1 err="Post \"https://vm05.local:8443/api/prometheus_receiver\": dial tcp 192.168.123.105:8443: connect: connection refused" 2026-03-09T15:40:29.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:29.285+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:29.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:29.285+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:29.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:29 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:29.285+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:30.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:30.235+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6806 osd.0 since back 2026-03-09T15:39:50.074843+0000 front 2026-03-09T15:39:50.075044+0000 (oldest deadline 2026-03-09T15:40:15.374429+0000) 2026-03-09T15:40:30.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:30.235+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6814 osd.1 since back 2026-03-09T15:39:55.374945+0000 front 2026-03-09T15:39:55.374926+0000 (oldest deadline 2026-03-09T15:40:17.074661+0000) 2026-03-09T15:40:30.561 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7[85545]: 2026-03-09T15:40:30.235+0000 7fb3871d0640 -1 osd.7 133 heartbeat_check: no reply from 192.168.123.105:6822 osd.2 since back 2026-03-09T15:39:59.875887+0000 front 2026-03-09T15:39:59.875999+0000 (oldest deadline 2026-03-09T15:40:22.775465+0000) 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 podman[92860]: 2026-03-09 15:40:30.600611739 +0000 UTC m=+5.054095309 container died 94ef91f7c8456a8f97c95d60b58a09e25dcf343c22e81d170ea5bf48fafe60d9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, CEPH_REF=squid, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.license=GPLv2, org.label-schema.name=CentOS Stream 9 Base Image, org.label-schema.vendor=CentOS, io.buildah.version=1.41.3, org.opencontainers.image.documentation=https://docs.ceph.com/, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, OSD_FLAVOR=default) 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 podman[92860]: 2026-03-09 15:40:30.627733512 +0000 UTC m=+5.081217093 container remove 94ef91f7c8456a8f97c95d60b58a09e25dcf343c22e81d170ea5bf48fafe60d9 (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.build-date=20260223, org.label-schema.schema-version=1.0, org.label-schema.vendor=CentOS, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, org.opencontainers.image.documentation=https://docs.ceph.com/, ceph=True, org.label-schema.name=CentOS Stream 9 Base Image, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.license=GPLv2, io.buildah.version=1.41.3, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, OSD_FLAVOR=default) 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 bash[92860]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 podman[92940]: 2026-03-09 15:40:30.788233162 +0000 UTC m=+0.020245959 container create 702404b50a3493aab65caa2ff827325ccf7a7392fbac5389323574202f9054ac (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, ceph=True, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.opencontainers.image.authors=Ceph Release Team , io.buildah.version=1.41.3, org.label-schema.build-date=20260223, org.label-schema.license=GPLv2, org.label-schema.schema-version=1.0) 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 podman[92940]: 2026-03-09 15:40:30.839802257 +0000 UTC m=+0.071815054 container init 702404b50a3493aab65caa2ff827325ccf7a7392fbac5389323574202f9054ac (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, org.label-schema.schema-version=1.0, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, OSD_FLAVOR=default, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, org.opencontainers.image.authors=Ceph Release Team , CEPH_REF=squid, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, ceph=True, org.label-schema.vendor=CentOS) 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 podman[92940]: 2026-03-09 15:40:30.843011894 +0000 UTC m=+0.075024691 container start 702404b50a3493aab65caa2ff827325ccf7a7392fbac5389323574202f9054ac (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.schema-version=1.0, ceph=True, FROM_IMAGE=quay.io/centos/centos:stream9, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.label-schema.build-date=20260223, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, org.opencontainers.image.authors=Ceph Release Team , CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS) 2026-03-09T15:40:30.878 INFO:journalctl@ceph.osd.7.vm09.stdout:Mar 09 15:40:30 vm09 podman[92940]: 2026-03-09 15:40:30.846913844 +0000 UTC m=+0.078926641 container attach 702404b50a3493aab65caa2ff827325ccf7a7392fbac5389323574202f9054ac (image=quay.ceph.io/ceph-ci/ceph:e911bdebe5c8faa3800735d1568fcdca65db60df, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-osd-7-deactivate, org.label-schema.build-date=20260223, org.label-schema.name=CentOS Stream 9 Base Image, CEPH_REF=squid, org.label-schema.license=GPLv2, org.opencontainers.image.documentation=https://docs.ceph.com/, org.label-schema.vendor=CentOS, FROM_IMAGE=quay.io/centos/centos:stream9, io.buildah.version=1.41.3, OSD_FLAVOR=default, org.opencontainers.image.authors=Ceph Release Team , org.label-schema.schema-version=1.0, ceph=True, CEPH_SHA1=e911bdebe5c8faa3800735d1568fcdca65db60df, GANESHA_REPO_BASEURL=https://buildlogs.centos.org/centos/$releasever-stream/storage/$basearch/nfsganesha-5/, CEPH_GIT_REPO=https://github.com/ceph/ceph-ci.git) 2026-03-09T15:40:31.009 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@osd.7.service' 2026-03-09T15:40:31.085 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:31.085 INFO:tasks.cephadm.osd.7:Stopped osd.7 2026-03-09T15:40:31.086 INFO:tasks.cephadm.prometheus.a:Stopping prometheus.a... 2026-03-09T15:40:31.086 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@prometheus.a 2026-03-09T15:40:31.334 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@prometheus.a.service' 2026-03-09T15:40:31.366 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-03-09T15:40:31.366 INFO:tasks.cephadm.prometheus.a:Stopped prometheus.a 2026-03-09T15:40:31.366 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea --force --keep-logs 2026-03-09T15:40:32.906 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:32 vm05 systemd[1]: Stopping Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:32 vm05 podman[108236]: 2026-03-09 15:40:32.905911812 +0000 UTC m=+0.026276723 container died e730a028339ffc53981bc8c8f675448deb9b6024a2becdb9520129889e99f1bf (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:32 vm05 podman[108236]: 2026-03-09 15:40:32.923663034 +0000 UTC m=+0.044027945 container remove e730a028339ffc53981bc8c8f675448deb9b6024a2becdb9520129889e99f1bf (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a, maintainer=The Prometheus Authors ) 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:32 vm05 bash[108236]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-a 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:32 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:32 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service: Failed with result 'exit-code'. 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:33 vm05 systemd[1]: Stopped Ceph node-exporter.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:40:33.207 INFO:journalctl@ceph.node-exporter.a.vm05.stdout:Mar 09 15:40:33 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.a.service: Consumed 1.262s CPU time. 2026-03-09T15:40:33.207 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 systemd[1]: Stopping Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a[81945]: ts=2026-03-09T15:40:33.277Z caller=main.go:583 level=info msg="Received SIGTERM, exiting gracefully..." 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 podman[108334]: 2026-03-09 15:40:33.288112685 +0000 UTC m=+0.026702281 container died 93224b6bb99a8ed36d55312ab96bebfc743219a6323303487fcd1a0eaf60b67b (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 podman[108334]: 2026-03-09 15:40:33.300440474 +0000 UTC m=+0.039030060 container remove 93224b6bb99a8ed36d55312ab96bebfc743219a6323303487fcd1a0eaf60b67b (image=quay.io/prometheus/alertmanager:v0.25.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a, maintainer=The Prometheus Authors ) 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 podman[108334]: 2026-03-09 15:40:33.301436634 +0000 UTC m=+0.040026221 volume remove 5502f6f6c0f3c05e563740087d78aaf95bd4c119e4feff12ddfbcd062d5445c7 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 bash[108334]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-alertmanager-a 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@alertmanager.a.service: Deactivated successfully. 2026-03-09T15:40:33.484 INFO:journalctl@ceph.alertmanager.a.vm05.stdout:Mar 09 15:40:33 vm05 systemd[1]: Stopped Ceph alertmanager.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:41:04.905 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea --force --keep-logs 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: Stopping Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 podman[93409]: 2026-03-09 15:41:06.315147359 +0000 UTC m=+0.079437410 container died a360ac0679f496154a5c2e8f12313ae81e0f281d83d357e933dee670fc82376d (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 podman[93409]: 2026-03-09 15:41:06.337828973 +0000 UTC m=+0.102119023 container remove a360ac0679f496154a5c2e8f12313ae81e0f281d83d357e933dee670fc82376d (image=quay.io/prometheus/node-exporter:v1.7.0, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b, maintainer=The Prometheus Authors ) 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 bash[93409]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-node-exporter-b 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service: Main process exited, code=exited, status=143/n/a 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service: Failed with result 'exit-code'. 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: Stopped Ceph node-exporter.b for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:41:06.426 INFO:journalctl@ceph.node-exporter.b.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@node-exporter.b.service: Consumed 1.220s CPU time. 2026-03-09T15:41:06.759 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: Stopping Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea... 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a[72367]: t=2026-03-09T15:41:06+0000 lvl=info msg="Shutdown started" logger=server reason="System signal: terminated" 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 podman[93529]: 2026-03-09 15:41:06.799643662 +0000 UTC m=+0.029963653 container died 6a58314a043eafb8f678557db139e544375686d49a492939138bab881ae891b3 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, io.openshift.expose-services=, version=8.5, description=Ceph Grafana Container, summary=Grafana Container configured for Ceph mgr/dashboard integration, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, build-date=2022-03-28T10:36:18.413762, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vendor=Red Hat, Inc., distribution-scope=public, architecture=x86_64, com.redhat.component=ubi8-container, io.k8s.display-name=Red Hat Universal Base Image 8, io.buildah.version=1.24.2, maintainer=Paul Cuzner , io.openshift.tags=base rhel8, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56, name=ubi8, release=236.1648460182, vcs-type=git, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.) 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 podman[93529]: 2026-03-09 15:41:06.824221106 +0000 UTC m=+0.054541097 container remove 6a58314a043eafb8f678557db139e544375686d49a492939138bab881ae891b3 (image=quay.io/ceph/ceph-grafana:8.3.5, name=ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a, com.redhat.build-host=cpt-1002.osbs.prod.upshift.rdu2.redhat.com, description=Ceph Grafana Container, io.openshift.tags=base rhel8, maintainer=Paul Cuzner , name=ubi8, release=236.1648460182, summary=Grafana Container configured for Ceph mgr/dashboard integration, com.redhat.component=ubi8-container, com.redhat.license_terms=https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI, distribution-scope=public, io.openshift.expose-services=, io.k8s.description=The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base image is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly., io.k8s.display-name=Red Hat Universal Base Image 8, io.buildah.version=1.24.2, version=8.5, architecture=x86_64, url=https://access.redhat.com/containers/#/registry.access.redhat.com/ubi8/images/8.5-236.1648460182, vendor=Red Hat, Inc., build-date=2022-03-28T10:36:18.413762, vcs-type=git, vcs-ref=3aadd00326f3dd6cfe65ee31017ab98915fddb56) 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 bash[93529]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana-a 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 bash[93548]: Error: no container with name or ID "ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea-grafana.a" found: no such container 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@grafana.a.service: Deactivated successfully. 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: Stopped Ceph grafana.a for 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea. 2026-03-09T15:41:07.035 INFO:journalctl@ceph.grafana.a.vm09.stdout:Mar 09 15:41:06 vm09 systemd[1]: ceph-452f6a00-1bcc-11f1-a1ee-7f1a2af01dea@grafana.a.service: Consumed 1.373s CPU time. 2026-03-09T15:41:28.109 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:41:28.137 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-03-09T15:41:28.163 INFO:tasks.cephadm:Archiving crash dumps... 2026-03-09T15:41:28.163 DEBUG:teuthology.misc:Transferring archived files from vm05:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525/remote/vm05/crash 2026-03-09T15:41:28.164 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/crash -- . 2026-03-09T15:41:28.204 INFO:teuthology.orchestra.run.vm05.stderr:tar: /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/crash: Cannot open: No such file or directory 2026-03-09T15:41:28.204 INFO:teuthology.orchestra.run.vm05.stderr:tar: Error is not recoverable: exiting now 2026-03-09T15:41:28.205 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/crash to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525/remote/vm09/crash 2026-03-09T15:41:28.206 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/crash -- . 2026-03-09T15:41:28.230 INFO:teuthology.orchestra.run.vm09.stderr:tar: /var/lib/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/crash: Cannot open: No such file or directory 2026-03-09T15:41:28.230 INFO:teuthology.orchestra.run.vm09.stderr:tar: Error is not recoverable: exiting now 2026-03-09T15:41:28.231 INFO:tasks.cephadm:Checking cluster log for badness... 2026-03-09T15:41:28.231 DEBUG:teuthology.orchestra.run.vm05:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_STRAY_DAEMON | egrep -v CEPHADM_FAILED_DAEMON | egrep -v CEPHADM_AGENT_DOWN | head -n 1 2026-03-09T15:41:28.280 INFO:tasks.cephadm:Compressing logs... 2026-03-09T15:41:28.281 DEBUG:teuthology.orchestra.run.vm05:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:41:28.323 DEBUG:teuthology.orchestra.run.vm09:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:41:28.348 INFO:teuthology.orchestra.run.vm05.stderr:find: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T15:41:28.348 INFO:teuthology.orchestra.run.vm05.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T15:41:28.348 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.a.log 2026-03-09T15:41:28.349 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log 2026-03-09T15:41:28.350 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-03-09T15:41:28.370 INFO:teuthology.orchestra.run.vm09.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.a.log: 91.1% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mgr.y.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log: 92.7% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log.gz 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.audit.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mgr.y.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.cephadm.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-volume.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/cephadm.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.b.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-volume.log: 93.6% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-volume.log.gz 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.cephadm.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.b.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.audit.log 2026-03-09T15:41:28.371 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.cephadm.log: 83.0% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.cephadm.log.gz 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.audit.log: 90.7% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.audit.log.gz 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm09.stderr: 90.9% -- replaced with /var/log/ceph/cephadm.log.gz 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mgr.x.log 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log: 87.0% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.log.gz 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.4.log 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.audit.log: 94.3% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.audit.log.gz 2026-03-09T15:41:28.372 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-volume.log 2026-03-09T15:41:28.373 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.cephadm.log: 90.5% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph.cephadm.log.gz 2026-03-09T15:41:28.374 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.c.log 2026-03-09T15:41:28.375 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mgr.x.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.5.log 2026-03-09T15:41:28.382 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.6.log 2026-03-09T15:41:28.384 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.0.log 2026-03-09T15:41:28.393 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.5.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.7.log 2026-03-09T15:41:28.394 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.c.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.1.log 2026-03-09T15:41:28.394 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.0.log: 93.7% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-volume.log.gz 2026-03-09T15:41:28.400 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.6.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.foo.vm09.aljafu.log 2026-03-09T15:41:28.404 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.2.log 2026-03-09T15:41:28.411 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.7.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.smpl.vm09.mkjxeh.log 2026-03-09T15:41:28.411 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.foo.vm09.aljafu.log: 75.8% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.foo.vm09.aljafu.log.gz 2026-03-09T15:41:28.416 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.3.log 2026-03-09T15:41:28.423 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.smpl.vm09.mkjxeh.log: 75.8% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.smpl.vm09.mkjxeh.log.gz 2026-03-09T15:41:28.425 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.foo.vm05.tiuqos.log 2026-03-09T15:41:28.432 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.smpl.vm05.grnlph.log 2026-03-09T15:41:28.433 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.foo.vm05.tiuqos.log: 77.0% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.foo.vm05.tiuqos.log.gz 2026-03-09T15:41:28.442 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/tcmu-runner.log 2026-03-09T15:41:28.444 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.smpl.vm05.grnlph.log: 75.7% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-client.rgw.smpl.vm05.grnlph.log.gz 2026-03-09T15:41:28.445 INFO:teuthology.orchestra.run.vm09.stderr: 89.9% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mgr.x.log.gz 2026-03-09T15:41:28.458 INFO:teuthology.orchestra.run.vm05.stderr:/var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/tcmu-runner.log: 84.5% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/tcmu-runner.log.gz 2026-03-09T15:41:28.785 INFO:teuthology.orchestra.run.vm05.stderr: 89.2% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mgr.y.log.gz 2026-03-09T15:41:28.833 INFO:teuthology.orchestra.run.vm09.stderr: 92.2% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.b.log.gz 2026-03-09T15:41:28.872 INFO:teuthology.orchestra.run.vm05.stderr: 92.3% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.c.log.gz 2026-03-09T15:41:29.484 INFO:teuthology.orchestra.run.vm05.stderr: 91.2% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-mon.a.log.gz 2026-03-09T15:41:29.571 INFO:teuthology.orchestra.run.vm09.stderr: 93.8% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.6.log.gz 2026-03-09T15:41:29.626 INFO:teuthology.orchestra.run.vm05.stderr: 93.9% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.2.log.gz 2026-03-09T15:41:29.797 INFO:teuthology.orchestra.run.vm09.stderr: 94.3% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.7.log.gz 2026-03-09T15:41:29.853 INFO:teuthology.orchestra.run.vm09.stderr: 93.8% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.5.log.gz 2026-03-09T15:41:29.916 INFO:teuthology.orchestra.run.vm09.stderr: 94.0% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.4.log.gz 2026-03-09T15:41:29.917 INFO:teuthology.orchestra.run.vm05.stderr: 93.8% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.0.log.gz 2026-03-09T15:41:29.918 INFO:teuthology.orchestra.run.vm09.stderr: 2026-03-09T15:41:29.918 INFO:teuthology.orchestra.run.vm09.stderr:real 0m1.578s 2026-03-09T15:41:29.918 INFO:teuthology.orchestra.run.vm09.stderr:user 0m2.914s 2026-03-09T15:41:29.918 INFO:teuthology.orchestra.run.vm09.stderr:sys 0m0.154s 2026-03-09T15:41:30.028 INFO:teuthology.orchestra.run.vm05.stderr: 93.9% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.1.log.gz 2026-03-09T15:41:30.142 INFO:teuthology.orchestra.run.vm05.stderr: 93.8% -- replaced with /var/log/ceph/452f6a00-1bcc-11f1-a1ee-7f1a2af01dea/ceph-osd.3.log.gz 2026-03-09T15:41:30.143 INFO:teuthology.orchestra.run.vm05.stderr: 2026-03-09T15:41:30.143 INFO:teuthology.orchestra.run.vm05.stderr:real 0m1.805s 2026-03-09T15:41:30.143 INFO:teuthology.orchestra.run.vm05.stderr:user 0m3.296s 2026-03-09T15:41:30.143 INFO:teuthology.orchestra.run.vm05.stderr:sys 0m0.185s 2026-03-09T15:41:30.144 INFO:tasks.cephadm:Archiving logs... 2026-03-09T15:41:30.144 DEBUG:teuthology.misc:Transferring archived files from vm05:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525/remote/vm05/log 2026-03-09T15:41:30.144 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T15:41:30.385 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/log/ceph to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525/remote/vm09/log 2026-03-09T15:41:30.385 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/log/ceph -- . 2026-03-09T15:41:30.607 INFO:tasks.cephadm:Removing cluster... 2026-03-09T15:41:30.607 DEBUG:teuthology.orchestra.run.vm05:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea --force 2026-03-09T15:41:30.836 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 452f6a00-1bcc-11f1-a1ee-7f1a2af01dea --force 2026-03-09T15:41:31.064 INFO:tasks.cephadm:Removing cephadm ... 2026-03-09T15:41:31.064 DEBUG:teuthology.orchestra.run.vm05:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T15:41:31.080 DEBUG:teuthology.orchestra.run.vm09:> rm -rf /home/ubuntu/cephtest/cephadm 2026-03-09T15:41:31.101 INFO:tasks.cephadm:Teardown complete 2026-03-09T15:41:31.101 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-03-09T15:41:31.104 INFO:teuthology.task.clock:Checking final clock skew... 2026-03-09T15:41:31.104 DEBUG:teuthology.orchestra.run.vm05:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T15:41:31.123 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-03-09T15:41:31.136 INFO:teuthology.orchestra.run.vm05.stderr:bash: line 1: ntpq: command not found 2026-03-09T15:41:31.151 INFO:teuthology.orchestra.run.vm05.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T15:41:31.151 INFO:teuthology.orchestra.run.vm05.stdout:=============================================================================== 2026-03-09T15:41:31.151 INFO:teuthology.orchestra.run.vm05.stdout:^+ basilisk.mybb.de 2 6 377 22 +2705us[+4762us] +/- 18ms 2026-03-09T15:41:31.151 INFO:teuthology.orchestra.run.vm05.stdout:^+ s7.vonderste.in 2 6 377 21 +4997us[+4997us] +/- 19ms 2026-03-09T15:41:31.151 INFO:teuthology.orchestra.run.vm05.stdout:^+ www.kernfusion.at 2 6 377 22 -3609us[-1551us] +/- 32ms 2026-03-09T15:41:31.151 INFO:teuthology.orchestra.run.vm05.stdout:^* mail.klausen.dk 2 6 377 21 +492us[+2549us] +/- 17ms 2026-03-09T15:41:31.161 INFO:teuthology.orchestra.run.vm09.stderr:bash: line 1: ntpq: command not found 2026-03-09T15:41:31.164 INFO:teuthology.orchestra.run.vm09.stdout:MS Name/IP address Stratum Poll Reach LastRx Last sample 2026-03-09T15:41:31.164 INFO:teuthology.orchestra.run.vm09.stdout:=============================================================================== 2026-03-09T15:41:31.164 INFO:teuthology.orchestra.run.vm09.stdout:^+ basilisk.mybb.de 2 6 377 20 +2568us[+2568us] +/- 18ms 2026-03-09T15:41:31.164 INFO:teuthology.orchestra.run.vm09.stdout:^+ s7.vonderste.in 2 6 377 20 +4777us[+4777us] +/- 19ms 2026-03-09T15:41:31.164 INFO:teuthology.orchestra.run.vm09.stdout:^+ www.kernfusion.at 2 6 377 21 -3813us[-3813us] +/- 32ms 2026-03-09T15:41:31.164 INFO:teuthology.orchestra.run.vm09.stdout:^* mail.klausen.dk 2 6 377 22 +510us[+2435us] +/- 17ms 2026-03-09T15:41:31.165 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-03-09T15:41:31.167 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-03-09T15:41:31.167 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-03-09T15:41:31.169 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-03-09T15:41:31.171 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-03-09T15:41:31.174 INFO:teuthology.task.internal:Duration was 1058.832724 seconds 2026-03-09T15:41:31.174 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-03-09T15:41:31.177 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-03-09T15:41:31.177 DEBUG:teuthology.orchestra.run.vm05:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T15:41:31.194 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-03-09T15:41:31.239 INFO:teuthology.orchestra.run.vm05.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T15:41:31.254 INFO:teuthology.orchestra.run.vm09.stderr:Redirecting to /bin/systemctl restart rsyslog.service 2026-03-09T15:41:31.685 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-03-09T15:41:31.686 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm05.local 2026-03-09T15:41:31.686 DEBUG:teuthology.orchestra.run.vm05:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T15:41:31.714 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-03-09T15:41:31.714 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-03-09T15:41:31.744 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-03-09T15:41:31.744 DEBUG:teuthology.orchestra.run.vm05:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:41:31.756 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:41:32.398 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-03-09T15:41:32.398 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:41:32.400 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-03-09T15:41:32.425 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T15:41:32.425 INFO:teuthology.orchestra.run.vm05.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T15:41:32.426 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:41:32.426 INFO:teuthology.orchestra.run.vm05.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T15:41:32.426 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-03-09T15:41:32.426 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T15:41:32.427 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-03-09T15:41:32.427 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-03-09T15:41:32.427 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-03-09T15:41:32.427 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-03-09T15:41:32.578 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.8% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T15:41:32.628 INFO:teuthology.orchestra.run.vm05.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 97.2% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-03-09T15:41:32.630 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-03-09T15:41:32.632 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-03-09T15:41:32.632 DEBUG:teuthology.orchestra.run.vm05:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T15:41:32.695 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-03-09T15:41:32.721 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-03-09T15:41:32.724 DEBUG:teuthology.orchestra.run.vm05:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:41:32.737 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:41:32.765 INFO:teuthology.orchestra.run.vm05.stdout:kernel.core_pattern = core 2026-03-09T15:41:32.786 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-03-09T15:41:32.802 DEBUG:teuthology.orchestra.run.vm05:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:41:32.835 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:41:32.835 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-03-09T15:41:32.857 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-03-09T15:41:32.857 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-03-09T15:41:32.860 INFO:teuthology.task.internal:Transferring archived files... 2026-03-09T15:41:32.860 DEBUG:teuthology.misc:Transferring archived files from vm05:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525/remote/vm05 2026-03-09T15:41:32.860 DEBUG:teuthology.orchestra.run.vm05:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T15:41:32.908 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/kyr-2026-03-09_11:23:05-orch-squid-none-default-vps/525/remote/vm09 2026-03-09T15:41:32.908 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-03-09T15:41:32.939 INFO:teuthology.task.internal:Removing archive directory... 2026-03-09T15:41:32.939 DEBUG:teuthology.orchestra.run.vm05:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T15:41:32.948 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-03-09T15:41:32.995 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-03-09T15:41:32.999 INFO:teuthology.task.internal:Not uploading archives. 2026-03-09T15:41:32.999 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-03-09T15:41:33.001 INFO:teuthology.task.internal:Tidying up after the test... 2026-03-09T15:41:33.001 DEBUG:teuthology.orchestra.run.vm05:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T15:41:33.003 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-03-09T15:41:33.018 INFO:teuthology.orchestra.run.vm05.stdout: 8532139 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 15:41 /home/ubuntu/cephtest 2026-03-09T15:41:33.052 INFO:teuthology.orchestra.run.vm09.stdout: 8532145 0 drwxr-xr-x 2 ubuntu ubuntu 6 Mar 9 15:41 /home/ubuntu/cephtest 2026-03-09T15:41:33.053 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-03-09T15:41:33.058 INFO:teuthology.run:Summary data: description: orch/cephadm/upgrade/{1-start-distro/1-start-centos_9.stream 2-repo_digest/repo_digest 3-upgrade/simple 4-wait 5-upgrade-ls agent/off mon_election/classic} duration: 1058.8327243328094 owner: kyr success: true 2026-03-09T15:41:33.058 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-03-09T15:41:33.077 INFO:teuthology.run:pass